content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import torch
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self,gamma=2,eps=1e-7,size_average=True):
super(FocalLoss,self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
def forward(self,prob,labels):
p_t = prob*labels + (1-prob)*(1-labels)
loss = -((1.0-p_t)**self.gamma)*torch.log(p_t+self.eps)
if self.size_average:
loss = torch.mean(loss)
return loss
|
nilq/baby-python
|
python
|
#Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços,
#na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.
Tabela = ('Lapís', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 25,
'Transferidor', 4.20,
'Compasso', 9.99,
'Mochila', 120.32,
'Canetas', 22.30,
'Livro', 34.90)
print('='*45)
print(f'{"TABELA DE PREÇOS":^40}')
print('='*45)
for pos in range(0,len(Tabela)):
if pos % 2 == 0:
print(f'{Tabela[pos]:.<30}', end = ' ')#< centraliza a esquerda o texto
if pos % 2 == 1:
print(f'R${Tabela[pos]:>7.2f}')#.2f e para formatar como dinheiro. / > centraliza na direita o texto.
|
nilq/baby-python
|
python
|
import maya.cmds as mc
import copy
def setDrivenKeyToRemapValue(animCurve,remapValueNode='',interpType=3,deleteAnimCurve=True,lockPosition=True,lockValue=False):
'''
Convert a set driven key setup to a remapValue node.
Each key on the animCurve node is represented as widget on the remapValue ramp control.
Incoming and outgoing curve connections will be replaced with equivalent remapValue connections.
@param animCurve: The animCurve to convert to a remapValue node
@type animCurve: str
@param remapValueNode: Name an existing remapValue node to use instead of creating a new one.
@type remapValueNode: str
@param interpType: Default ramp interpolation type.
@type interpType: int
@param deleteAnimCurve: Delete animCurve node after disconnection
@type deleteAnimCurve: bool
@param lockPosition: Lock ramp widget position values
@type lockPosition: bool
@param lockValue: Lock ramp widget float values
@type lockValue: bool
'''
# Checks
if not mc.objExists(animCurve):
raise Exception('AnimCurve node "'+animCurve+'" does not exist!!')
if remapValueNode and not mc.objExists(remapValueNode):
raise Exception('RemapValue node "'+remapValueNode+'" does not exist!!')
# Get connections to animCurve
inConn = mc.listConnections(animCurve+'.input',s=True,d=False,p=True)
outConn = mc.listConnections(animCurve+'.output',s=False,d=True,p=True)
# Get keyframe data
valList = mc.keyframe(animCurve,q=True,vc=True)
floatList = mc.keyframe(animCurve,q=True,fc=True)
# Get min/max input and output values
orderValList = copy.deepcopy(valList)
orderFloatList = copy.deepcopy(floatList)
orderValList.sort()
orderFloatList.sort()
minVal = orderValList[0]
maxVal = orderValList[-1]
minFloat = orderFloatList[0]
maxFloat = orderFloatList[-1]
# Create remapValue node
if not remapValueNode:
remapValueNode = mc.createNode('remapValue',n=animCurve+'_remapValue')
# Set Remap attribute values
mc.setAttr(remapValueNode+'.inputMin',minFloat)
mc.setAttr(remapValueNode+'.inputMax',maxFloat)
mc.setAttr(remapValueNode+'.outputMin',minVal)
mc.setAttr(remapValueNode+'.outputMax',maxVal)
# Remove existing ramp widgets
indexList = range(mc.getAttr(remapValueNode+'.value',s=True))
indexList.reverse()
for i in indexList:
mc.removeMultiInstance(remapValueNode+'.value['+str(i)+']',b=True)
# Set ramp widgets based on keys
valRange = maxVal - minVal
floatRange = maxFloat - minFloat
# Check zero values
if valRange < 0.0001: valRange = 0.0001
if floatRange < 0.0001: floatRange = 0.0001
# Iterate through keys
for i in range(len(valList)):
val = (valList[i] - minVal)/valRange
flt = (floatList[i] - minFloat)/floatRange
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Position',flt)
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_FloatValue',val)
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Interp',interpType)
if lockPosition:
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_Position',l=True)
if lockValue:
mc.setAttr(remapValueNode+'.value['+str(i)+'].value_FloatValue',l=True)
# Replace animCurve connections
mc.connectAttr(inConn[0],remapValueNode+'.inputValue',f=True)
mc.connectAttr(remapValueNode+'.outValue',outConn[0],f=True)
# Delete unused animCurve
if deleteAnimCurve: mc.delete(animCurve)
# Return result
return remapValueNode
|
nilq/baby-python
|
python
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Activation,Flatten,Dropout
from keras.layers import Conv2D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
data=np.load('data.npy')
target=np.load('target.npy')
#loading the save numpy arrays in the previous code
model=Sequential()
model.add(Conv2D(200,(3,3),input_shape=data.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#The first CNN layer followed by Relu and MaxPooling layers
model.add(Conv2D(100,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#The second convolution layer followed by Relu and MaxPooling layers
model.add(Flatten())
model.add(Dropout(0.5))
#Flatten layer to stack the output convolutions from second convolution layer
model.add(Dense(50,activation='relu'))
#Dense layer of 64 neurons
model.add(Dense(2,activation='softmax'))
#The Final layer with two outputs for two categories
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
train_data,test_data,train_target,test_target=train_test_split(data,target,test_size=0.1)
checkpoint = ModelCheckpoint('model-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')
history=model.fit(train_data,train_target,epochs=20,callbacks=[checkpoint],validation_split=0.2)
plt.plot(history.history['loss'],'r',label='training loss')
plt.plot(history.history['val_loss'],label='validation loss')
plt.xlabel('# epochs')
plt.ylabel('loss')
plt.legend()
plt.plot(history.history['accuracy'],'r',label='training accuracy')
plt.plot(history.history['val_accuracy'],label='validation accuracy')
plt.xlabel('# epochs')
plt.ylabel('loss')
plt.legend()
|
nilq/baby-python
|
python
|
import zipfile
import os
from time import gmtime, strftime
from helper import utility
from lxml import etree
"""
MIT License
Copyright (c) 2018 Chapin Bryce, Preston Miller
Please share comments and questions at:
https://github.com/PythonForensics/Learning-Python-for-Forensics
or email pyforcookbook@gmail.com
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
def main(filename):
"""
The main function confirms the file type and sends it
to be processed.
:param filename: name of the file potentially containing
embedded metadata.
:return: A dictionary from getTags, containing the embedded
metadata.
"""
# DOCX, XLSX, and PPTX signatures
signatures = ['504b030414000600']
if utility.check_header(filename, signatures, 8) is True:
return get_tags(filename)
else:
raise TypeError
def get_tags(filename):
"""
The get_tags function extracts the office metadata from the
data object.
:param filename: the path and name to the data object.
:return: tags and headers, tags is a dictionary containing
office metadata and headers are the order of keys for the CSV
output.
"""
# Set up CSV headers
headers = ['Path', 'Name', 'Size', 'Filesystem CTime',
'Filesystem MTime', 'Title', 'Author(s)','Create Date',
'Modify Date', 'Last Modified By Date', 'Subject', 'Keywords',
'Description', 'Category', 'Status', 'Revision',
'Edit Time (Min)', 'Page Count', 'Word Count',
'Character Count', 'Line Count',
'Paragraph Count', 'Slide Count', 'Note Count',
'Hidden Slide Count', 'Company', 'Hyperlink Base']
# Create a ZipFile class from the input object
# This allows us to read or write to the 'Zip archive'
try:
zf = zipfile.ZipFile(filename)
except zipfile.BadZipfile:
return {}, headers
# These two XML files contain the embedded metadata of
# interest
try:
core = etree.fromstring(zf.read('docProps/core.xml'))
app = etree.fromstring(zf.read('docProps/app.xml'))
except KeyError as e:
assert Warning(e)
return {}, headers
tags = {}
tags['Path'] = filename
tags['Name'] = os.path.basename(filename)
tags['Size'] = utility.convert_size(
os.path.getsize(filename))
tags['Filesystem CTime'] = strftime('%m/%d/%Y %H:%M:%S',
gmtime(os.path.getctime(filename)))
tags['Filesystem MTime'] = strftime('%m/%d/%Y %H:%M:%S',
gmtime(os.path.getmtime(filename)))
# Core Tags
for child in core.iterchildren():
if 'title' in child.tag:
tags['Title'] = child.text
if 'subject' in child.tag:
tags['Subject'] = child.text
if 'creator' in child.tag:
tags['Author(s)'] = child.text
if 'keywords' in child.tag:
tags['Keywords'] = child.text
if 'description' in child.tag:
tags['Description'] = child.text
if 'lastModifiedBy' in child.tag:
tags['Last Modified By Date'] = child.text
if 'created' in child.tag:
tags['Create Date'] = child.text
if 'modified' in child.tag:
tags['Modify Date'] = child.text
if 'category' in child.tag:
tags['Category'] = child.text
if 'contentStatus' in child.tag:
tags['Status'] = child.text
if (filename.endswith('.docx') or
filename.endswith('.pptx')):
if 'revision' in child.tag:
tags['Revision'] = child.text
# App Tags
for child in app.iterchildren():
if filename.endswith('.docx'):
if 'TotalTime' in child.tag:
tags['Edit Time (Min)'] = child.text
if 'Pages' in child.tag:
tags['Page Count'] = child.text
if 'Words' in child.tag:
tags['Word Count'] = child.text
if 'Characters' in child.tag:
tags['Character Count'] = child.text
if 'Lines' in child.tag:
tags['Line Count'] = child.text
if 'Paragraphs' in child.tag:
tags['Paragraph Count'] = child.text
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
elif filename.endswith('.pptx'):
if 'TotalTime' in child.tag:
tags['Edit Time (Min)'] = child.text
if 'Words' in child.tag:
tags['Word Count'] = child.text
if 'Paragraphs' in child.tag:
tags['Paragraph Count'] = child.text
if 'Slides' in child.tag:
tags['Slide Count'] = child.text
if 'Notes' in child.tag:
tags['Note Count'] = child.text
if 'HiddenSlides' in child.tag:
tags['Hidden Slide Count'] = child.text
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
else:
if 'Company' in child.tag:
tags['Company'] = child.text
if 'HyperlinkBase' in child.tag:
tags['Hyperlink Base'] = child.text
return tags, headers
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from app import db
age_func = db.Table('age_func',
db.Column('id', db.Integer, primary_key=True, autoincrement=True),
db.Column('age_id', db.Integer, db.ForeignKey('age_group.id'), nullable=False),
db.Column('func_id', db.Integer, db.ForeignKey('function.id'), nullable=False)
)
class AgeGroup(db.Model):
__tablename__ = 'age_group'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(10), nullable=False, unique=True)
functions = db.relationship('Function',
secondary=age_func,
backref=db.backref('age_set', lazy='dynamic')
)
books = db.relationship('Book', backref='age_bk', lazy='dynamic')
def __init__(self, name):
self.name = name
def __str__(self):
return '<AgeGroup: {}>'.format(self.name)
def model_to_dict(self, query_relation=False):
ag_dict = {
'id': self.id,
'name': self.name
}
if query_relation:
funcs = []
if self.functions is not None:
for func in self.functions:
funcs.append(func.model_to_dict())
ag_dict['functions'] = funcs
return ag_dict
def save(self):
db.session.add(self)
db.session.commit()
class Function(db.Model):
__tablename__ = 'function'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(10), nullable=False, unique=True)
books = db.relationship('Book', backref='function_set', lazy='dynamic')
age_groups = db.relationship('AgeGroup',
secondary=age_func,
backref=db.backref('function_ag', lazy='dynamic')
)
def __init__(self, name):
self.name = name
def __str__(self):
return '<Function: {}>'.format(self.name)
def model_to_dict(self, query_relation=False):
fun_dict = {
'id': self.id,
'name': self.name
}
if query_relation:
ags = []
if self.age_groups is not None:
for ag in self.age_groups:
ags.append(ag.model_to_dict())
fun_dict['age_groups'] = ags
return fun_dict
def save(self):
db.session.add(self)
db.session.commit()
|
nilq/baby-python
|
python
|
# Tool Imports
from bph.tools.windows.nircmd import BphNirCmd as NirCmd
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.session import BphSession as Session
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
nircmd = NirCmd()
nircmd.start_process(program=r'calc.exe')
nircmd.execute(delay=3)
nircmd = NirCmd()
nircmd.kill_process(program=r'calc.exe')
nircmd.execute(delay=3)
|
nilq/baby-python
|
python
|
from sudachipy import dictionary
from sudachipy import tokenizer
from sudachipy.plugin import oov
from kuro2sudachi.normalizer import SudachiCharNormalizer
import jaconv
import fileinput
import argparse
import json
import os
import re
mode = tokenizer.Tokenizer.SplitMode.C
parser = argparse.ArgumentParser(
description="convert kuromoji user dict to sudacchi user dict"
)
parser.add_argument("file", help="kuromoji dict file path")
parser.add_argument(
"-c",
"--config",
help="convert config file (json format file)",
)
parser.add_argument("-o", "--out", help="output path")
parser.add_argument(
"-d",
"--rewrite_def",
default=os.path.dirname(os.path.abspath(__file__)) + "/rewrite.def",
help="rewrite text file path",
)
parser.add_argument(
"--rm_already_exist",
action="store_true",
help="remove words system dict already exist"
)
parser.add_argument("-r", "--sudachi_setting", help="the setting file in JSON format")
parser.add_argument("-s", "--sudachi_dict_type", help="sudachidict type")
parser.add_argument("-m", "--merge_dict", help="A dictionary for split registration of words that are not in the system dictionary. Must be specified as a user dictionary in sudachi's configuration file (json).")
parser.add_argument(
"--ignore",
action="store_true",
help="ignore invalid format line / unsupported pos error / oov error in splitted word",
)
default_setting = {
"固有名詞": {
"sudachi_pos": "名詞,固有名詞,一般,*,*,*",
"left_id": 4786,
"right_id": 4786,
"cost": 7000,
},
"名詞": {
"sudachi_pos": "名詞,普通名詞,一般,*,*,*",
"left_id": 5146,
"right_id": 5146,
"cost": 7000,
},
}
p = re.compile("[\u30A1-\u30FC]*")
class Error(Exception):
pass
class UnSupportedPosError(Error):
pass
class DictFormatError(Error):
pass
class OOVError(Error):
pass
class Converter:
def __init__(
self,
rewrite_file,
config=None,
sudachi_setting=None,
dict_type="core",
rm=False,
):
if rewrite_file == "":
raise DictFormatError("rewrite.def file path is required")
self.tokenizer = dictionary.Dictionary(
dict_type=dict_type, config_path=sudachi_setting
).create()
if config is not None:
with open(config) as f:
s = json.load(f)
else:
s = default_setting
self.rewrite = rewrite_file
self.setting = s
self.rm = rm
self.normalizer = SudachiCharNormalizer(rewrite_def_path=self.rewrite)
def convert(self, line: str) -> str:
data = line.split(",")
try:
word = data[0]
# splited = data[1]
yomi = self.nomlized_yomi(data[2].replace(" ", ""))
pos = self.pos_convert(data[3].replace(" ", ""))
except IndexError:
raise DictFormatError(f"'{line}' is invalid format")
words = [m.surface() for m in self.tokenizer.tokenize(word, mode)]
# alrady exists in system dic
if self.rm and len(words) == 1:
return ""
normalized = self.normalizer.rewrite(word)
unit_div_info = "*,*"
try:
if (udm := pos.get("unit_div_mode")) != None:
unit_div_info = self.split(normalized, udm)
except OOVError as e:
print(e)
raise e
split_mode = pos.get("split_mode", "*")
return f"{normalized},{pos['left_id']},{pos['right_id']},{pos['cost']},{word},{pos['sudachi_pos']},{yomi},{word},*,{split_mode},{unit_div_info},*"
def pos_convert(self, pos: str):
try:
spos = self.setting[pos]
return spos
except KeyError:
raise UnSupportedPosError(f"{pos} is not supported pos")
def nomlized_yomi(self, yomi: str) -> str:
yomi = jaconv.hira2kata(yomi)
if p.fullmatch(yomi):
return yomi
return ""
def split_info(self, normalized: str, udm: list[str], mode: any) -> str:
word_ids = []
oov = []
for m in self.tokenizer.tokenize(normalized, mode):
if ",".join(m.part_of_speech()) == "名詞,数詞,*,*,*,*":
return "*"
if m.is_oov() or m.dictionary_id()==-1:
oov.append(m.surface())
continue
word_ids.append(str(m.word_id()))
if len(oov) > 0:
raise OOVError(f"split word has out of vocab: {oov} in {normalized}")
return "/".join(word_ids)
def split(self, normalized: str, udm: list[str]) -> str:
try:
unit_div_info = []
if "A" in udm:
info = self.split_info(normalized, udm, tokenizer.Tokenizer.SplitMode.A)
unit_div_info.append(info)
else:
unit_div_info.append("*")
if "B" in udm:
info = self.split_info(normalized, udm, tokenizer.Tokenizer.SplitMode.B)
unit_div_info.append(info)
else:
unit_div_info.append("*")
return ",".join(unit_div_info)
except OOVError as e:
raise e
def cli() -> str:
args = parser.parse_args()
out = open(args.out, "wt")
rewrite = args.rewrite_def
rm = args.rm_already_exist
config = args.config
sudachi_setting = args.sudachi_setting
sudachi_dict_type = args.sudachi_dict_type
merge_dict = args.merge_dict
c = Converter(
rewrite,
config,
sudachi_setting=sudachi_setting,
dict_type=sudachi_dict_type,
rm=rm,
)
with fileinput.input(files=merge_dict) as merged:
for line in merged:
line = line.replace("\n" , "")
out.write(f"{line}\n")
with fileinput.input(files=args.file) as input:
for line in input:
line = line.strip()
if line == "":
continue
if line[0] == "#":
continue
converted = ""
try:
converted = c.convert(line)
if converted == "":
continue
except (UnSupportedPosError, DictFormatError, OOVError) as e:
if args.ignore:
continue
else:
raise e
out.write(f"{converted}\n")
|
nilq/baby-python
|
python
|
###############################################################################
#
# Copyright (c) 2018, Henrique Morimitsu,
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# #############################################################################
from datetime import datetime
import numpy as np
import os
import os.path as osp
import shutil
import tensorflow as tf
import time
tfe = tf.contrib.eager
class LearningRate(object):
""" Helper class for managing the learning rate. It current implements
only learning rate decay at fixed step numbers.
Arguments:
global_step: tfe.Variable: the current step (iteration) number.
initial_lr: float: initial value of learning rate.
lr_decay: float: decay value to multiply at each decay step.
lr_decay_steps: list: the step numbers at which the decay is applied.
"""
def __init__(self, global_step, initial_lr, lr_decay, lr_decay_steps):
self.global_step = global_step
self.current_lr = tfe.Variable(initial_lr, dtype=tf.float32, name='lr')
self.initial_lr = tf.constant(initial_lr, tf.float32)
self.lr_decay = tf.constant(lr_decay, tf.float32)
self.lr_decay_steps = lr_decay_steps
self.last_lr_update = tfe.Variable(
global_step, dtype=tf.int64, name='last_lr_update')
def get_lr(self):
""" Returns the current learning rate.
Note that this call will activate the decay, if global_step is at a
decay step value.
Returns:
tfe.Variable: the learning rate ath the current global_step
"""
if self.global_step > self.last_lr_update and \
int(self.global_step) in self.lr_decay_steps:
tf.assign(self.current_lr, self.current_lr * self.lr_decay)
tf.assign(self.last_lr_update, self.global_step)
return self.current_lr
def trainer(tr_manager_trainer_queue,
trainer_tr_manager_queue,
train_dir, batch_size,
save_ckpt_interval,
max_train_iters,
initial_lr,
lr_decay,
lr_decay_steps,
log_interval,
backpropagate_losing_policies,
keep_checkpoint_every_n_hours,
game_config_string,
game_manager_module,
game_manager_kwargs):
""" Starts the training process. The network parameters will be restored
from a checkpoint, if it exists.
Args:
tr_manager_trainer_queue: Queue: to get training batch samples from
trainer_manager.
trainer_tr_manager_queue: Queue: to put checkpoint file names to
trainer_manager.
train_dir: string: path to the directory where training files are
stored.
batch_size: int: batch size to use during training.
save_ckpt_interval: int: number of training steps to save a new
checkpoint.
max_train_iters: int: number of training steps before concluding.
initial_lr: float: initial value of learning rate.
lr_decay: float: decay value to multiply at each decay step.
lr_decay_steps: list: the step numbers at which the decay is applied.
log_interval: int: number of steps to print a training log message.
backpropagate_losing_policies: boolean: if False, ignore policy losses
coming from the losing player.
keep_checkpoint_every_n_hours: float: interval in hours at which a
checkpoint is kept on disk permanently.
game_config_string: string: a name for the current game.
game_manager_module: list: a list with two string containing the name
of the game manager module (file) and the name of the class inside of
the module.
game_manager_kwargs: dict: a dictionary of arguments and its respective
values.
"""
np.random.seed()
ckpt_path = game_manager_kwargs['ckpt_path']
game_manager_kwargs['replace_unloaded_resnet_by_naivenet'] = False
gm_module = __import__(game_manager_module[0])
gm_class = getattr(gm_module, game_manager_module[1])
game_manager = gm_class(**game_manager_kwargs)
global_step = tf.train.get_or_create_global_step()
lr = LearningRate(global_step, initial_lr, lr_decay, lr_decay_steps)
start_time = time.time()
net = game_manager.net
optimizer = tf.train.MomentumOptimizer(
lr.get_lr(), momentum=0.9, use_nesterov=True)
checkpoint = tfe.Checkpoint(
net=net, optimizer=optimizer, global_step=global_step,
current_lr=lr.current_lr)
if ckpt_path is not None:
print('Loading training params from: ' + ckpt_path)
checkpoint.restore(ckpt_path)
ckpt_name = None
if ckpt_path is not None:
ckpt_name = osp.split(ckpt_path)[1]
trainer_tr_manager_queue.put(ckpt_name)
writer = tf.contrib.summary.create_file_writer(train_dir)
writer.set_as_default()
total_loss = 0.0
total_policy_loss = 0.0
total_value_loss = 0.0
total_reg_loss = 0.0
exp_decay = 1.0 - 1.0/log_interval
exp_moving_loss = -1.0
exp_moving_policy_loss = -1.0
exp_moving_value_loss = -1.0
exp_moving_reg_loss = -1.0
keep_checkpoint_every_n_seconds = keep_checkpoint_every_n_hours * 3600.0
last_kept_checkpoint_time = time.time()
while global_step <= max_train_iters:
# Workaround for memory leak when using loss in Eager Execution
# See tensorflow issue #20062
tf.reset_default_graph()
with tf.contrib.summary.always_record_summaries():
states_batch, policy_batch, value_prior_batch = \
tr_manager_trainer_queue.get()
with tf.device(game_manager_kwargs['tf_device']):
states_batch_tf = tf.constant(states_batch, tf.float32)
policy_batch_tf = tf.constant(policy_batch, tf.int32)
value_prior_batch_tf = \
tf.constant(value_prior_batch, tf.float32)
with tfe.GradientTape() as tape:
policy_pred, value_pred = \
net(states_batch_tf, training=True)
policy_loss = tf.losses.sparse_softmax_cross_entropy(
policy_batch_tf, policy_pred,
reduction=tf.losses.Reduction.NONE)
if not backpropagate_losing_policies:
policy_loss = tf.where(
tf.less(value_prior_batch_tf, 0.0),
tf.zeros_like(policy_loss),
policy_loss)
policy_loss = tf.reduce_mean(policy_loss)
value_loss = tf.square(
value_pred[:, 0] - value_prior_batch_tf)
value_loss = tf.reduce_mean(value_loss)
reg_loss = tf.reduce_sum(net.losses)
loss = policy_loss + value_loss + reg_loss
grads = tape.gradient(loss, net.variables)
optimizer.apply_gradients(
zip(grads, net.variables),
global_step=global_step)
total_loss += loss
total_policy_loss += policy_loss
total_value_loss += value_loss
total_reg_loss += reg_loss
if exp_moving_loss < 0.0:
exp_moving_loss = loss
exp_moving_policy_loss = policy_loss
exp_moving_value_loss = value_loss
exp_moving_reg_loss = reg_loss
else:
exp_moving_loss = \
exp_decay * exp_moving_loss + (1.0-exp_decay) * loss
exp_moving_policy_loss = \
exp_decay * exp_moving_policy_loss + \
(1.0-exp_decay) * policy_loss
exp_moving_value_loss = \
exp_decay * exp_moving_value_loss + \
(1.0-exp_decay) * value_loss
exp_moving_reg_loss = \
exp_decay * exp_moving_reg_loss + \
(1.0-exp_decay) * reg_loss
if int(global_step) % log_interval == 0:
tf.contrib.summary.scalar(
'policy_loss', exp_moving_policy_loss,
step=global_step)
tf.contrib.summary.scalar(
'value_loss', exp_moving_value_loss, step=global_step)
tf.contrib.summary.scalar(
'regularization_loss', exp_moving_reg_loss,
step=global_step)
tf.contrib.summary.scalar(
'total_loss', exp_moving_loss, step=global_step)
tf.contrib.summary.scalar('lr', lr.get_lr(),
step=global_step)
total_loss /= log_interval
total_policy_loss /= log_interval
total_value_loss /= log_interval
total_reg_loss /= log_interval
elapsed_time = time.time() - start_time
examples_per_second = \
(states_batch.shape[0] * float(log_interval)) / \
elapsed_time
print(
('%s: Train iter: %d, loss %.04f, ' +
'policy-loss %.04f, value-loss %.04f, ' +
'regul-loss %.04f, lr %.1e, ' +
'%.01f examples per sec.') %
(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'),
global_step, total_loss, total_policy_loss,
total_value_loss, total_reg_loss,
float(lr.get_lr().value()), examples_per_second))
total_loss = 0.0
total_policy_loss = 0.0
total_value_loss = 0.0
start_time = time.time()
if int(global_step) % save_ckpt_interval == 0:
ckpt_name = '%s-%d.ckpt' % \
(game_config_string, global_step)
ckpt_path = osp.join(train_dir, ckpt_name)
checkpoint.save(ckpt_path)
ckpt_path = tf.train.get_checkpoint_state(train_dir)\
.model_checkpoint_path
# This could be done automatically if tfe.Checkpoint
# supported the keep_checkpoint_every_n_hours argument
# like tf.train.Saver does
ckpt_interval = time.time() - last_kept_checkpoint_time
if ckpt_interval > keep_checkpoint_every_n_seconds:
last_ckpt_files = [f for f in os.listdir(train_dir)
if f.startswith(ckpt_name)]
for lcf in last_ckpt_files:
shutil.copy(
osp.join(train_dir, lcf),
osp.join(train_dir, lcf.replace(
'.ckpt', '.ckpt-keep')))
last_kept_checkpoint_time = time.time()
print('%s: saved model %s' %
(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'),
osp.join(train_dir,
'%s-%d.ckpt' %
(game_config_string, global_step))))
if global_step < max_train_iters:
ckpt_name = osp.split(ckpt_path)[1]
trainer_tr_manager_queue.put(ckpt_name)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='dynamicmultithreadedexecutor',
version='1.0.2',
description='Dynamic Multi-threaded Executor',
author='Kevin McCabe',
author_email='csmp@hotmail.com',
url='https://github.com/gumpcraca/dynamicmultithreadedexecutor',
keywords = [],
packages=find_packages(),
install_requires=['six','sentinels'],
py_modules=["dynamicmultithreadedexecutor"],
classifiers = [],
)
|
nilq/baby-python
|
python
|
def EscreverArquivoRelatorio(tabelaDados, somaMegaBytes, dadosMedio):
'''Função para escrever o relatorio final do problema'''
arquivo_final = open('relatório.txt', 'w')
arquivo_final.write('ACME Inc. Uso do espaço em disco pelos usuários')
arquivo_final.write('\n')
arquivo_final.write('-' * 70)
arquivo_final.write('\n')
arquivo_final.write(tabela.to_string())
arquivo_final.write('\n')
arquivo_final.write('\n')
arquivo_final.write('Espaço total ocupado: {:.2f} MB'.format(soma_lista))
arquivo_final.write('\n')
arquivo_final.write('Espaço médio ocupado: {:.2f} MB'.format(medio_ocupado))
arquivo_final.close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestPointOfSale(TransactionCase):
def setUp(self):
super(TestPointOfSale, self).setUp()
# ignore pre-existing pricelists for the purpose of this test
self.env["product.pricelist"].search([]).write({"active": False})
self.currency = self.env.ref("base.USD")
self.company1 = self.env["res.company"].create({
"name": "company 1",
"currency_id": self.currency.id
})
self.company2 = self.env["res.company"].create({
"name": "company 2",
"currency_id": self.currency.id
})
self.company2_pricelist = self.env["product.pricelist"].create({
"name": "company 2 pricelist",
"currency_id": self.currency.id,
"company_id": self.company2.id,
"sequence": 1, # force this pricelist to be first
})
self.env.user.company_id = self.company1
def test_default_pricelist_with_company(self):
""" Verify that the default pricelist belongs to the same company as the config """
company1_pricelist = self.env["product.pricelist"].create({
"name": "company 1 pricelist",
"currency_id": self.currency.id,
"company_id": self.company1.id,
"sequence": 2,
})
# make sure this doesn't pick the company2 pricelist
new_config = self.env["pos.config"].create({
"name": "usd config"
})
self.assertEqual(new_config.pricelist_id, company1_pricelist,
"POS config incorrectly has pricelist %s" % new_config.pricelist_id.display_name)
def test_default_pricelist_without_company(self):
""" Verify that a default pricelist without a company works """
universal_pricelist = self.env["product.pricelist"].create({
"name": "universal pricelist",
"currency_id": self.currency.id,
"sequence": 2,
})
# make sure this doesn't pick the company2 pricelist
new_config = self.env["pos.config"].create({
"name": "usd config"
})
self.assertEqual(new_config.pricelist_id, universal_pricelist,
"POS config incorrectly has pricelist %s" % new_config.pricelist_id.display_name)
|
nilq/baby-python
|
python
|
class TensorflowModelWrapper:
def __init__(self):
self._model = None
def set_model(self, model):
self._model = model
def forward(self, input_):
return self._model.predict(input_)
def __call__(self, *args, **kwargs):
return self._model.predict(*args, **kwargs)
|
nilq/baby-python
|
python
|
"""
Partial Entropy Decomposition with the Hcs measure from Ince (2017)
https://arxiv.org/abs/1702.01591
"""
from __future__ import division
import numpy as np
from itertools import combinations
from .pid import BasePID
from .lattice import pid_lattice
from .. import modify_outcomes
from ..algorithms import maxent_dist
from ..multivariate import entropy
from ..utils import flatten, powerset
def h_cs(d, inputs, output=None):
"""
Compute H_cs, the average of positive pointwise co-information values
Parameters
----------
d : Distribution
The distribution to compute i_ccs for.
inputs : iterable of iterables
The input variables.
Returns
-------
hcs : float
The value of H_cs.
"""
var_map = {var: i for i, var in enumerate(inputs)}
vars = list(sorted(var_map.values()))
d = d.coalesce(inputs)
n_variables = d.outcome_length()
# pairwise marginal maxent
if n_variables > 2:
marginals = list(combinations(range(n_variables), 2))
d = maxent_dist(d, marginals)
d = modify_outcomes(d, lambda o: tuple(o))
# calculate pointwise co-information
sub_vars = [var for var in powerset(vars) if var]
sub_dists = {var: d.marginal(var) for var in sub_vars}
coinfos = {}
for e in d.outcomes:
coinfos[e] = 0.0
for sub_var in sub_vars:
P = sub_dists[sub_var][tuple([e[i] for i in flatten(sub_var)])]
coinfos[e] = coinfos[e] + np.log2(P)*((-1) ** (len(sub_var)))
# sum positive pointwise terms
hcs = sum(d[e] * coinfos[e] for e in d.outcomes if coinfos[e] > 0.0)
return hcs
class PED_CS(BasePID):
"""
The change in surprisal partial entropy decomposition, as defined by Ince (2017).
https://arxiv.org/abs/1702.01591
"""
_name = "H_cs"
_measure = staticmethod(h_cs)
_red_string = "H_r"
_pi_string = "H_d"
def __init__(self, dist, inputs=None, **kwargs):
"""
Parameters
----------
dist : Distribution
The distribution to compute the decomposition on.
inputs : iter of iters, None
The set of variables to include. If None, `dist.rvs` is used.
"""
self._dist = dist
if inputs is None:
inputs = dist.rvs
self._kwargs = kwargs
self._inputs = tuple(map(tuple, inputs))
self._output = None
self._lattice = pid_lattice(self._inputs)
self._total = entropy(self._dist, rvs=self._inputs)
self._compute()
|
nilq/baby-python
|
python
|
from unittest import TestCase
import numpy as np
from pyfibre.tests.probe_classes.utilities import generate_probe_graph
from pyfibre.tests.dummy_classes import DummyGraphSegment
from pyfibre.tests.probe_classes.objects import ProbeGraphSegment
class TestBaseGraphSegment(TestCase):
def setUp(self):
self.graph = generate_probe_graph()
self.graph_segment = ProbeGraphSegment()
def test__getstate__(self):
status = self.graph_segment.to_json()
self.assertIn('shape', status)
self.assertDictEqual(
status['graph'],
{'directed': False,
'graph': {},
'links': [{'r': 1.4142135623730951,
'source': 2, 'target': 3},
{'r': 1.4142135623730951,
'source': 3, 'target': 4},
{'r': 1, 'source': 4, 'target': 5}],
'multigraph': False,
'nodes': [{'xy': [0, 0], 'id': 2},
{'xy': [1, 1], 'id': 3},
{'xy': [2, 2], 'id': 4},
{'xy': [2, 3], 'id': 5}]
}
)
def test_deserialise(self):
status = self.graph_segment.to_json()
new_graph_segment = DummyGraphSegment.from_json(status)
status = new_graph_segment.to_json()
self.assertDictEqual(
status['graph'],
{'directed': False,
'graph': {},
'links': [{'r': 1.4142135623730951,
'source': 2, 'target': 3},
{'r': 1.4142135623730951,
'source': 3, 'target': 4},
{'r': 1, 'source': 4, 'target': 5}],
'multigraph': False,
'nodes': [{'xy': [0, 0], 'id': 2},
{'xy': [1, 1], 'id': 3},
{'xy': [2, 2], 'id': 4},
{'xy': [2, 3], 'id': 5}]
}
)
def test_network_init(self):
self.assertEqual(4, self.graph_segment.number_of_nodes)
self.assertListEqual(
[2, 3, 4, 5], self.graph_segment.node_list)
self.assertEqual(3, self.graph_segment.graph.size())
self.assertTrue(
np.allclose(np.array([1, 1]),
self.graph_segment.graph.nodes[3]['xy']))
self.assertAlmostEqual(
np.sqrt(2), self.graph_segment.graph.edges[3, 4]['r'])
self.assertTrue(np.allclose(
np.array([[0, 0],
[1, 1],
[2, 2],
[2, 3]]),
self.graph_segment.node_coord))
def test_network_segment(self):
segment = self.graph_segment.region
self.assertEqual(
(3, 4), self.graph_segment.region.image.shape)
self.assertEqual(12, segment.area)
with self.assertRaises(AttributeError):
_ = segment.intensity_image
self.graph_segment._iterations = 0
self.graph_segment._area_threshold = 0
self.graph_segment._sigma = None
segment = self.graph_segment.region
self.assertEqual((3, 4), segment.image.shape)
self.assertEqual(4, segment.area)
self.graph_segment.image = np.ones((5, 5)) * 2
segment = self.graph_segment.region
self.assertEqual((3, 4), segment.image.shape)
self.assertEqual((3, 4), segment.intensity_image.shape)
def test_add_node_edge(self):
self.graph_segment.add_node(6)
self.assertEqual(5, self.graph_segment.number_of_nodes)
self.graph_segment.add_edge(6, 2)
self.assertEqual(4, self.graph_segment.graph.size())
|
nilq/baby-python
|
python
|
"""Hack route cipher sent by Abraham Lincoln."""
from itertools import combinations
from src.ch03.c1_anagram_generator import split
def get_factors(integer: int) -> list:
"""Get factors of integer.
Calculate factors of a given integer.
Args:
integer (int): Number to get factors of.
Returns:
List of integer factors of **integer**.
"""
result = []
# A factor will always be less than or equal to sqrt(integer).
for i in range(1, int(integer ** 0.5) + 1):
if integer % i == 0:
result.append(i)
# If you have one factor, the other is integer / factor
result.append(integer // i)
return sorted(list(set(result))) # Eliminate perfect squares
def keygen(length: int) -> list:
"""Generate all possible route cipher keys.
Generates a list of all possible route cipher keys of **length**.
Args:
length (int): Length of route cipher key.
Returns:
List of lists of integers representing all possible route cipher keys
of **length**.
Example:
>>> from src.ch04.practice.p1_hack_lincoln import keygen
>>> keygen(2)
[[-1, -2], [-1, 2], [1, -2], [1, 2]]
"""
result = []
master_key = range(1, length + 1)
# Get all possible combinations of direction (pos/neg) of length
combs = set(combinations([-1, 1] * length, length)) # Remove repeats
for comb in combs:
result.append([sign * key for sign, key in zip(comb, master_key)])
result.sort() # Sort for test consistency.
return result
def decode_route(keys: list, cipherlist: list) -> list:
"""Decode route cipher.
Decode **cipherlist** encoded with a route cipher using **keys**.
Args:
keys (list): List of signed, integer keys.
cipherlist (list): List of strings representing encoded message.
Returns:
List of strings representing plaintext message.
Note:
Assumes vertical encoding route.
"""
table, message = [], []
split_list = split(cipherlist, len(keys))
rows = len(split_list[0])
# Build translation table.
for key in keys:
if key < 0:
# If negative, reverse direction
split_list[0].reverse()
table.append(split_list[0])
del split_list[0]
# For each column in the table, copy the relevant row.
for row in range(rows):
for column in table:
message.append(column[row])
return message
def hack_route(ciphertext: str) -> None:
"""Hack route cipher.
Hack route cipher by using :func:`get_factors` to find all possible key
lengths. Then use :func:`keygen` to generate all possible keys and pass
each one through :func:`decode_route`.
Args:
ciphertext (str): Message encoded with route cipher.
Returns:
None. Prints all possible decoded messages.
"""
cipherlist = ciphertext.split()
# Get all possible key lengths.
factors = get_factors(len(cipherlist))
for factor in factors:
# Get all possible keys.
if any([factor == 1, factor == len(cipherlist)]):
# Key length of 1 is the full cipherlist and key length of
# cipherlist length is one word per column.
continue
keys = keygen(factor)
for key in keys:
# Use each key to decode route cipher.
message = ' '.join(decode_route(key, cipherlist))
print(f'Key: {key}\nDecoded message: {message}\n')
def main():
"""Demonstrate hack of Lincoln's route cipher."""
print('I can do a brute-force hack of a route cipher sent by '
'Abraham Lincoln,\nand I do a better job than he did in that dumb '
'zombie movie.')
print('\nNote: I only hack the route cipher. I leave the '
'word-transposition\ncipher to you and your biochemical brain.\n')
ciphertext = """THIS OFF DETAINED ASCERTAIN WAYLAND CORRESPONDENTS OF AT
WHY AND IF FILLS IT YOU GET THEY NEPTUNE THE TRIBUNE PLEASE ARE THEM CAN
UP"""
print(f'Hacking: {ciphertext}\n')
hack_route(ciphertext)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.core.files.storage import FileSystemStorage
class MediaStorage(FileSystemStorage):
pass
class ZarrStorage(FileSystemStorage):
pass
class FilesStorage(FileSystemStorage):
pass
class LocalStorage():
media = MediaStorage
zarr = ZarrStorage
files = FilesStorage
|
nilq/baby-python
|
python
|
from django.urls import path, include
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.Home.as_view()),
path('posts/', include([
path('create/', views.CriarPost.as_view(), name='criar-post'),
path('<slug:titulo>/', views.VerPost.as_view(), name="ver-post"),
])),
path('tags/', include([
path('<str:nome>/', views.VerPostsTag.as_view(), name="ver-posts-tag")
]))
]
|
nilq/baby-python
|
python
|
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential, Model
from keras.layers import \
Dense, Activation, Conv2D, MaxPool2D, Dropout, Flatten, Input, Reshape, LSTM, Embedding, RepeatVector,\
TimeDistributed, Bidirectional, Concatenate, Lambda, SpatialDropout1D, Softmax
from keras.optimizers import Adam
from tensorflow.python.client import device_lib
from keras.utils import multi_gpu_model
import tensorflow as tf
from sklearn import datasets
from tqdm import tqdm
import math, sys, os, random
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from keras.layers import Input, Conv2D, Conv2DTranspose, Dense, Reshape, MaxPooling2D, UpSampling2D, Flatten, Cropping2D
from keras.models import Model, Sequential
from keras.engine.topology import Layer
from keras.utils import to_categorical
import numpy as np
from tensorboardX import SummaryWriter
import util
INDEX_FROM = 3
CHECK = 5
def generate_seq(model : Model, seed, size, temperature=1.0):
ls = seed.shape[0]
# Due to the way Keras RNNs work, we feed the model the
# whole sequence each time, constantly sampling the nect word.
# It's a little bit inefficient, but that doesn't matter much when generating
tokens = np.concatenate([seed, np.zeros(size - ls)])
for i in range(ls, size):
probs = model.predict(tokens[None,:])
# Extract the i-th probability vector and sample an index from it
next_token = util.sample_logits(probs[0, i-1, :], temperature=temperature)
tokens[i] = next_token
return [int(t) for t in tokens]
def sparse_loss(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def go(options):
slength = options.max_length
top_words = options.top_words
lstm_hidden = options.lstm_capacity
print('devices', device_lib.list_local_devices())
tbw = SummaryWriter(log_dir=options.tb_dir)
if options.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
np.random.seed(seed)
else:
np.random.seed(options.seed)
if options.task == 'file':
dir = options.data_dir
x, x_vocab_len, x_word_to_ix, x_ix_to_word = \
util.load_sentences(options.data_dir, vocab_size=top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
print('max sequence length ', x_max_len)
print(len(x_ix_to_word), 'distinct words')
x = util.batch_pad(x, options.batch)
def decode(seq):
return ' '.join(x_ix_to_word[id] for id in seq)
elif options.task == 'europarl':
dir = options.data_dir
x, x_vocab_len, x_word_to_ix, x_ix_to_word, _, _, _, _ = \
util.load_data(dir+os.sep+'europarl-v8.fi-en.en', dir+os.sep+'europarl-v8.fi-en.fi', vocab_size=top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
print('max sequence length ', x_max_len)
print(len(x_ix_to_word), 'distinct words')
x = util.batch_pad(x, options.batch)
def decode(seq):
return ' '.join(x_ix_to_word[id] for id in seq)
elif options.task == 'imdb':
# Load only training sequences
(x, _), _ = imdb.load_data(num_words=top_words)
# rm start symbol
x = [l[1:] for l in x]
# x = sequence.pad_sequences(x, maxlen=slength+1, padding='post', truncating='post')
# x = x[:, 1:] # rm start symbol
x = util.batch_pad(x, options.batch)
word_to_id = keras.datasets.imdb.get_word_index()
word_to_id = {k: (v + INDEX_FROM) for k, v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
word_to_id["???"] = 3
id_to_word = {value: key for key, value in word_to_id.items()}
def decode(seq):
return ' '.join(id_to_word[id] for id in seq)
else:
raise Exception('Task {} not recognized.'.format(options.task))
print('Data Loaded.')
print(sum([b.shape[0] for b in x]), ' sentences loaded')
# for i in range(3):
# print(x[i, :])
# print(decode(x[i, :]))
## Define model
input = Input(shape=(None, ))
embedding = Embedding(top_words, lstm_hidden, input_length=None)
embedded = embedding(input)
decoder_lstm = LSTM(lstm_hidden, return_sequences=True)
h = decoder_lstm(embedded)
if options.extra is not None:
for _ in range(options.extra):
h = LSTM(lstm_hidden, return_sequences=True)(h)
fromhidden = Dense(top_words, activation='linear')
out = TimeDistributed(fromhidden)(h)
model = Model(input, out)
opt = keras.optimizers.Adam(lr=options.lr)
lss = sparse_loss
model.compile(opt, lss)
model.summary()
epochs = 0
instances_seen = 0
while epochs < options.epochs:
for batch in tqdm(x):
n, l = batch.shape
batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol
batch_out = np.concatenate([batch, np.zeros((n, 1))], axis=1) # append pad symbol
loss = model.train_on_batch(batch_shifted, batch_out[:, :, None])
instances_seen += n
tbw.add_scalar('lm/batch-loss', float(loss), instances_seen)
epochs += options.out_every
# Show samples for some sentences from random batches
for temp in [0.0, 0.7, 1, 1.3, 1.5]:
print('### TEMP ', temp)
for i in range(CHECK):
b = random.choice(x)
if b.shape[1] > 20:
seed = b[0,:20]
else:
seed = b[0, :]
seed = np.insert(seed, 0, 1)
gen = generate_seq(model, seed, 60, temperature=temp)
print('*** [', decode(seed), '] ', decode(gen[len(seed):]))
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs.",
default=150, type=int)
parser.add_argument("-E", "--embedding-size",
dest="embedding_size",
help="Size of the word embeddings on the input layer.",
default=300, type=int)
parser.add_argument("-o", "--output-every",
dest="out_every",
help="Output every n epochs.",
default=1, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.00001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch",
help="Batch size",
default=32, type=int)
parser.add_argument("-t", "--task",
dest="task",
help="Task",
default='imdb', type=str)
parser.add_argument("-D", "--data-directory",
dest="data_dir",
help="Data directory",
default='./data', type=str)
parser.add_argument("-L", "--lstm-hidden-size",
dest="lstm_capacity",
help="LSTM capacity",
default=256, type=int)
parser.add_argument("-m", "--max_length",
dest="max_length",
help="Max length",
default=None, type=int)
parser.add_argument("-w", "--top_words",
dest="top_words",
help="Top words",
default=10000, type=int)
parser.add_argument("-I", "--limit",
dest="limit",
help="Character cap for the corpus",
default=None, type=int)
parser.add_argument("-T", "--tb-directory",
dest="tb_dir",
help="Tensorboard directory",
default='./runs/lm', type=str)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random",
default=1, type=int)
parser.add_argument("-x", "--extra-layers",
dest="extra",
help="Number of extra LSTM layers",
default=None, type=int)
options = parser.parse_args()
print('OPTIONS', options)
go(options)
|
nilq/baby-python
|
python
|
import asyncio
import ffmpeg
# Reason: Following export method in __init__.py from Effective Python 2nd Edition item 85
from asynccpu import ProcessTaskPoolExecutor # type: ignore
# Reason: Following export method in __init__.py from Effective Python 2nd Edition item 85
from asyncffmpeg import FFmpegCoroutineFactory, StreamSpec # type: ignore
async def create_stream_spec_copy() -> StreamSpec:
stream = ffmpeg.input("input.mp4")
return ffmpeg.output(stream, "output1.mp4", c="copy")
async def create_stream_spec_filter() -> StreamSpec:
stream = ffmpeg.input("input.mp4")
stream = ffmpeg.filter(stream, "scale", 768, -1)
return ffmpeg.output(stream, "output2.mp4")
async def main() -> None:
ffmpeg_coroutine = FFmpegCoroutineFactory.create()
with ProcessTaskPoolExecutor(max_workers=3, cancel_tasks_when_shutdown=True) as executor:
awaitables = (
executor.create_process_task(ffmpeg_coroutine.execute, create_stream_spec)
for create_stream_spec in [create_stream_spec_copy, create_stream_spec_filter]
)
await asyncio.gather(*awaitables)
if __name__ == "__main__":
asyncio.run(main())
|
nilq/baby-python
|
python
|
import argparse
import csv
import inspect
import os
import re
import warnings
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from pathlib import Path
from time import time
import pandas as pd
warnings.filterwarnings("ignore")
REPO = Path(__file__).resolve().parents[2]
@contextmanager
def timer(name):
t0 = time()
print(f'[{name}] start')
yield
print(f'[{name}] done in {time() - t0:.0f} s')
def get_arguments(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', '-f', action='store_true', help='Overwrite existing files')
return parser.parse_args()
def get_features(namespace):
for k, v in ({k: v for k, v in namespace.items()}).items():
if inspect.isclass(v) and issubclass(v, Feature) and not inspect.isabstract(v):
yield v()
def generate_features(namespace, overwrite):
for f in get_features(namespace):
if f.data_path.exists() and not overwrite:
print(f.name, 'was skipped')
else:
f.run().save()
class Feature(metaclass=ABCMeta):
prefix = ""
suffix = ""
dir = REPO / "data" / "processed"
def __init__(self):
if self.__class__.__name__.isupper():
self.name = self.__class__.__name__.lower()
else:
self.name = re.sub("([A-Z])", lambda x: "_" + x.group(1).lower(), self.__class__.__name__).lstrip('_')
# ユーザーに更新してもらう
self.data = pd.DataFrame()
self.data_path = self.dir / f"{self.name}.pkl"
def run(self):
with timer(self.name):
self.create_features()
prefix = self.prefix + '_' if self.prefix else ''
suffix = '_' + self.suffix if self.suffix else ''
self.data.columns = prefix + self.data.columns + suffix
return self
@abstractmethod
def create_features(self):
raise NotImplementedError
def save(self):
# LightGBMError: Do not support special JSON characters in feature name.回避
self.data.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in self.data.columns]
self.data.to_pickle(str(self.data_path))
def load(self):
self.data = pd.read_pickle(str(self.data_path))
def create_memo(col_name, desc):
file_path = Feature.dir / "_features_memo.csv"
# hydraのログパスにカレントディレクトリが移動してしまうので初期化
os.chdir(os.path.dirname(os.path.abspath(__file__)))
file_path.touch()
with open(file_path, "r+") as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
col = [line for line in lines if line.split(",")[0] == col_name]
if len(col) != 0:
return
writer = csv.writer(f)
writer.writerow([col_name, desc])
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
# 相関分析で相関の高い変数を削るのでここではdrop_first=FalseでOK
df = pd.get_dummies(
df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
|
nilq/baby-python
|
python
|
# -*- coding: utf8 -*-
from ..core.Oracle import Oracle
from ..utils.ColorString import ColorString
from ..utils.utils import *
from .Config import Config
import argparse
import re
import os
def install_jdk():
Oracle.install_jdk()
def uninstall_jdk():
Oracle.execute_uninstall_jdk()
def rsync_server_core_data():
'''服务器迁移时用来迁移服务器核心数据'''
#命令行参数解析 argparse 使用文档:https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(description='use rsync command to sync the minecraft server core data to other location or host')
# Tool
parser.add_argument('-s', metavar='source', dest='source', help='Specified the source file or dir to sync')
parser.add_argument('-d', metavar='destination', dest='destination', help='Specified the destination dir to sync')
parser.add_argument('-y', '--yes', default=False, action='store_true', help='ask yes when require user select')
args = parser.parse_args()
source = args.source
destination = args.destination
ftp_server_base_dir_name = os.path.basename(Config.game_ftp_server_base_dir())
server_core_data_dir_name = os.path.basename(Config.game_ftp_server_core_data_backup_dir())
server_core_data_dir_path = os.path.join(os.path.expanduser('~'),"%s/%s" % (ftp_server_base_dir_name, server_core_data_dir_name))
if not source and os.path.exists(server_core_data_dir_path):
source = server_core_data_dir_path
if os.path.isdir(server_core_data_dir_path):
source += '/*'
def check_args(source, destination):
if not destination or not source:
print(ColorString.warn('You should provide both source and destination argument for this command, destination can be a (local dir/file) remote host (example: ubuntu@mc.jokerhub.cn)'))
exit(-1)
def execute_sync(source, destination, test = True):
check_args(source, destination)
pattern = re.compile(r'\w+@\w+')
dest = destination.strip()
source = source.strip()
match = re.match(pattern, dest)
if match:
ftp_server_base_dir_name = os.path.basename(Config.game_ftp_server_base_dir())
sync_file_dir_name = os.path.basename(source)
if not os.path.exists(source):
segments = list(os.path.split(source))[0:-1]
sync_file_dir_name = os.path.basename(os.path.join(*segments))
dest += ':~/%s/%s' % (ftp_server_base_dir_name,sync_file_dir_name)
else:
dest += ':~/%s/' % (ftp_server_base_dir_name)
rsync_cmd = 'rsync -zarvh %s %s ' % (source, dest)
rsync_cmd += "--exclude 'plugins/dynmap/*'"
if test:
rsync_cmd += ' -n'
os.system(rsync_cmd)
if test:
print('\ncommand: %s' % ColorString.confirm(rsync_cmd))
print(ColorString.hint("Run in Fake Mode!"))
check_args(source = source, destination = destination)
execute_sync(source = source, destination = destination, test = True)
confirm = ['Y','y','Yes','yes']
cancel = ['N','n','No','no']
while True:
a = hint(ColorString.confirm('\nAre you confirm want to execute this operation? [%s] ' % ('/'.join(confirm) + '|' + '/'.join(cancel))))
if a in confirm:
execute_sync(source=source, destination = destination, test = False)
break
elif a in cancel:
break
else:
print(ColorString.warn('Your input is invalid, Please try again!'))
|
nilq/baby-python
|
python
|
import unittest
import numpy as np
from nptest import nptest
class Test_ShapeBaseTests(unittest.TestCase):
def test_atleast_1d(self):
a = np.atleast_1d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_1d(x)
print(b)
print("**************")
c = np.atleast_1d(1, [3,4])
print(c)
def test_atleast_2d(self):
a = np.atleast_2d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_2d(x)
print(b)
print("**************")
c = np.atleast_2d(1, [3,4], [5.6])
print(c)
def test_atleast_3d(self):
a = np.atleast_3d(1.0)
print(a)
print("**************")
x = np.arange(9.0).reshape(3,3)
b = np.atleast_3d(x)
print(b)
print("**************")
c = np.atleast_3d([1,2], [[3,4]], [[5,6]])
print(c)
for arr in c:
print(arr, arr.shape)
def test_vstack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.vstack((a,b))
print(c)
def test_vstack_2(self):
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
c = np.vstack((a,b))
print(c)
def test_hstack_1(self):
a = np.array((1, 2, 3))
b = np.array((2, 3, 4))
c = np.hstack((a,b))
print(c)
def test_hstack_2(self):
a = np.array([[1],[2],[3]])
b = np.array([[2],[3],[4]])
c = np.hstack((a,b))
print(c)
def test_stack_1(self):
a = np.array([[1],[2],[3]])
b = np.array([[2],[3],[4]])
c = np.stack((a,b), axis=0)
print(c)
print("**************")
d = np.stack((a,b), axis=1)
print(d)
print("**************")
e = np.stack((a,b), axis=2)
print(e)
def test_block_1(self):
A = np.eye(2) * 2
B = np.eye(3) * 3
C = np.block([[A, np.zeros((2, 3))], [np.ones((3, 2)), B]])
print(C)
def test_block_2(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.block([a, b, 10]) # hstack([a, b, 10])
print(c)
print("**************")
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.block([[a], [b]]) # vstack([a, b])
print(c)
def test_expand_dims_1(self):
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).reshape(2,-1, 2)
b = np.expand_dims(a, axis=0)
print(b)
print("**************")
c = np.expand_dims(a, axis=1)
print(c)
print("**************")
d = np.expand_dims(a, axis=2)
print(d)
def test_column_stack_1(self):
a = np.array((1, 2, 3))
b = np.array((2, 3, 4))
c = np.column_stack((a, b))
print(c)
print("**************")
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.column_stack([a, b])
print(c)
def test_row_stack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.row_stack((a,b))
print(c)
def test_dstack_1(self):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
c = np.dstack((a,b))
print(c)
print("**************")
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
c = np.dstack((a,b))
print(c)
def test_array_split_1(self):
x = np.arange(8.0)
y = np.array_split(x, 3)
print(y)
print("**************")
x = np.arange(7.0)
y = np.array_split(x, 3)
print(y)
def test_array_split_2(self):
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=0)
print(y)
print("**************")
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=1)
print(y)
print("**************")
x = np.arange(16.0).reshape(2,8,1)
y = np.array_split(x, 3, axis=2)
print(y)
def test_split_1(self):
x = np.arange(9.0)
y = np.split(x, 3)
print(y)
print("**************")
x = np.arange(8.0)
y = np.split(x, [3,5,6,10])
print(y)
def test_split_2(self):
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=0)
print(y)
print("**************")
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=1)
print(y)
print("**************")
x = np.arange(16.0).reshape(8,2,1)
y = np.split(x, [2,3], axis=2)
print(y)
def test_hsplit_1(self):
x = np.arange(16).reshape(4,4)
y = np.hsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(4,4)
y = np.hsplit(x, [3,6])
print(y)
def test_hsplit_2(self):
x = np.arange(8).reshape(2,2,2)
y = np.hsplit(x, 2)
print(y)
print("**************")
x = np.arange(8).reshape(2,2,2)
y = np.hsplit(x, [3,6])
print(y)
def test_vsplit_1(self):
x = np.arange(16).reshape(4,4)
y = np.vsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(4,4)
y = np.vsplit(x, [3,6])
print(y)
def test_vsplit_2(self):
x = np.arange(8).reshape(2,2,2)
y = np.vsplit(x, 2)
print(y)
print("**************")
x = np.arange(8).reshape(2,2,2)
y = np.vsplit(x, [3,6])
print(y)
def test_dsplit_1(self):
x = np.arange(16).reshape(2,2,4)
y = np.dsplit(x, 2)
print(y)
print("**************")
x = np.arange(16).reshape(2,2,4)
y = np.dsplit(x, [3,6])
print(y)
def test_kron_1(self):
a = np.kron([1,10,100], [5,6,7])
print(a)
print("*******")
b = np.kron([5,6,7], [1,10,100])
print(b)
print("*******")
x = np.array([[2,3],[4,5]])
y = np.array([[5,6],[7,8]])
c = np.kron(x,y)
print(c)
print(c.shape)
print("*******")
c = np.kron(np.eye(2, dtype=np.int32), np.ones((2,2), dtype=np.int32))
print(c)
print(c.shape)
print("*******")
x = np.array([[[2,3,3],[4,5,3]]])
y = np.array([[[5,6,6,6],[7,8,6,6]]])
c = np.kron(x,y)
print(c)
print(c.shape)
print("*******")
d = np.kron(np.ones((5,7,9, 11), dtype=np.int32), np.ones((3,4, 6, 8), dtype=np.int32))
print(d.shape)
def test_kron_2(self):
a = np.arange(100).reshape((2,5,2,5))
b = np.arange(24).reshape((2,3,4))
c = np.kron(a,b)
print(c.shape)
d = c.sum()
print(d)
def test_tile_1(self):
a = np.array([0, 1, 2])
b = np.tile(a, 2)
print(b)
print("**************")
c = np.tile(a, (2,2))
print(c)
print("**************")
d = np.tile(a, (2,1,2))
print(d)
e = np.arange(100).reshape((2,5,2,5))
f = np.tile(e, (2,1,2))
print(f.shape)
def test_tile_2(self):
a = np.array([[1, 2], [3, 4]])
b = np.tile(a, 2)
print(b)
print("**************")
c = np.tile(a, (2, 1))
print(c)
print("**************")
d = np.array([1,2,3,4])
e = np.tile(d,(4,1))
print(e)
def test_apply_along_axis_1(self):
def my_func(a):
#Average first and last element of a 1-D array"""
return (a[0] + a[-1]) * 0.5
def my_func2(a):
#Average first and last element of a 1-D array"""
return (a[0] * 10)
b = np.array([[1,2,3], [4,5,6], [7,8,9]])
c = np.apply_along_axis(my_func2, 0, b)
print(c)
d = np.apply_along_axis(my_func, 1, b);
print(d)
print(b)
def test_apply_along_axis_2(self):
b = np.array([[[8,1,7], [4,3,9], [5,2,6]]])
c = np.apply_along_axis(sorted, 1, b)
print(c)
c = np.apply_along_axis(sorted, 0, b[:,0,0])
print(c)
c = np.apply_along_axis(sorted, 0, b[0,:,0])
print(c)
c = np.apply_along_axis(sorted, 0, b[0,0,:])
print(c)
def test_apply_along_axis_3(self):
b = np.array([[1,2,3], [4,5,6], [7,8,9]])
c = np.diag(b)
c = np.apply_along_axis(np.diag, 1, b)
print(c)
def test_apply_over_axes_1(self):
a = np.arange(24).reshape(2,3,4)
# print(a)
# Sum over axes 0 and 2. The result has same number of dimensions as the original array:
b = np.apply_over_axes(np.sum, a, [0,2])
print(b)
print("");
print("*******")
print("");
# Tuple axis arguments to ufuncs are equivalent:
c = np.sum(a, axis=(0,2), keepdims=True)
print(c)
print("");
print("*******")
print("");
d = np.sum(a, axis=0, keepdims=True)
print(d)
print("");
print("*******")
print("");
e = np.sum(a, axis=2, keepdims=True)
print(e)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os
from behave import *
from copy import deepcopy
from lxml import etree
import tempfile
import uuid
import logging
from pds_doi_service.core.entities.exceptions import InputFormatException, CriticalDOIException
from pds_doi_service.core.util.doi_xml_differ import DOIDiffer
from pds_doi_service.core.actions.draft import DOICoreActionDraft
from pds_doi_service.core.actions.reserve import DOICoreActionReserve
from pds_doi_service.core.actions.release import DOICoreActionRelease
from pds_doi_service.core.outputs.osti.osti_web_client import DOIOstiWebClient
from pds_doi_service.core.util.config_parser import DOIConfigUtil
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global flag to submit the DOI to OSTI or not after it has been built.
g_submit_flag = True
g_submit_flag = False
def get_temporary_output_filename(extension='xml'):
return os.path.join(tempfile.gettempdir(), f'{str(uuid.uuid4())}.{extension}')
def save_doi_to_temporary_file(doi_label):
# Save doi_label to disk so it can be compared to reference in next step.
temporary_file_name = get_temporary_output_filename()
with open(temporary_file_name,"w+") as f:
f.write(doi_label + "\n")
return temporary_file_name
def replace_lidvid_in_file(input_file, lid, extension='csv'):
input_value_with_random_lidvid = get_temporary_output_filename(extension=extension)
with open(input_file, 'r') as f_in:
with open(input_value_with_random_lidvid, 'w') as f_out:
for line in f_in.readlines():
f_out.write(line.replace('{{random_lid}}', lid))
return input_value_with_random_lidvid
def draft_action_run(node_value,input_value, lid=None):
# Helper function to 'draft' a given input_value and write the DOI to a temporary file.
# This file will be available for other validation functions.
db_name = 'doi_temp.db'
_action = DOICoreActionDraft(db_name=db_name)
logger.info(f"input_value {input_value}")
if lid:
input_value = replace_lidvid_in_file(input_value, lid, extension='xml')
o_doi_label = _action.run(input=input_value,
node=node_value,
submitter='my_user@my_node.gov',force=True)
# Save o_doi_label to disk so it can be compared to reference in next step
logger.info(f"success input_value {input_value}")
return save_doi_to_temporary_file(o_doi_label)
def reserve_action_run(node_value,input_value, lid=None):
# Helper function to 'reserve' a given input_value.
logger.info(f"when node_value,input_value {node_value,input_value}")
db_name = 'doi_temp.db'
_action = DOICoreActionReserve(db_name=db_name)
if lid:
input_value = replace_lidvid_in_file(input_value, lid, extension='csv')
o_doi_label = _action.run(
input=input_value,
node=node_value, submitter='my_user@my_node.gov',
dry_run=True, force=True)
return save_doi_to_temporary_file(o_doi_label)
def release_action_run(node_value, input_value):
try:
db_name = 'doi_temp.db'
release_action = DOICoreActionRelease(db_name=db_name)
released_doi_str = release_action.run(input=input_value, node=node_value,
submitter='my_user@my_node.gov', force=True)
return save_doi_to_temporary_file(released_doi_str)
except Exception as e:
raise
def file_output_compare(output_file, ref_output_value):
# Function compare two XML files created from 'draft' or 'reserve' actions.
# Assumption(s):
# 1. The name of the new XML file is defined in get_temporary_output_filename().
# 2. The name of the reference name is ref_output_value
logger.info(f"output_file,ref_output_value {output_file},{ref_output_value}")
o_fields_differ_list, o_values_differ_list, o_record_index_differ_list = DOIDiffer.doi_xml_differ(ref_output_value,
output_file)
logger.info(f'different fields are {o_fields_differ_list}')
logger.info(f'o_fields_differ_list {o_fields_differ_list}')
logger.info(f'o_values_differ_list {o_values_differ_list}')
logger.info(f'o_record_index_differ_list {o_record_index_differ_list}')
assert len(o_fields_differ_list) is 0
return 1
@given('a valid input at {input_value}')
def given_valid_action_input(context, input_value):
logger.info(f"given {input_value}")
context.input_value = input_value # Don't forget to set the input_value in context to be available for other functions.
@given('an invalid PDS4 label at {input_value}')
def given_invalid_pds4(context, input_value):
logger.info(f'an invalid reserve PDS4 label at input_value {input_value}')
context.input_value = input_value # Don't forget to set the input_value in context to be available for other functions.
@given('random new lid')
def given_random_new_lid(context):
context.random_lid = f'urn:nasa:pds:{uuid.uuid4()}'
@when('create draft DOI for node {node_value}')
def when_create_draft_impl(context, node_value):
logger.info(f"when create DOI draft ")
logger.info(f"input_value {context.input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
new_draft_output = draft_action_run(node_value,
context.input_value,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(new_draft_output)
except CriticalDOIException as e:
logger.info(str(e))
context.exception_msg = str(e)
@then('a reading error report is generated for {input_value}')
def step_an_error_report_is_generated_impl(context, input_value):
assert hasattr(context, 'exception_msg')
assert context.exception_msg == f'Error reading file {input_value}'
@when('reserve DOI in OSTI format at {node_value}')
def step_when_reserve_doi_in_osti_format_impl(context, node_value):
input_value = context.input_value
logger.info(f"when context {context}")
logger.info(f"when input_value {input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
new_reserve_file = reserve_action_run(node_value,input_value)
context.output_files.append(new_reserve_file)
except InputFormatException as e:
# Save the error message to context.exception_msg so the function step_an_error_report_is_generated_impl has something to check
logger.info(f"Expecting InputFormatException from input_value {input_value}")
context.exception_msg = str(e)
logger.error(e)
except CriticalDOIException as e:
logger.info(f"CRITICAL {e}")
logger.info(f"Expecting CriticalDOIException from input_value {input_value}")
logger.info(str(e))
# Save the error message to context.exception_msg so the function step_an_error_report_is_generated_impl has something to check
context.exception_msg = str(e)
logger.info(f"context.failed {context.failed}")
@then('OSTI DOI label is created from {input_value} for node {node_value}')
def step_then_osti_doi_label_is_created_impl(context,node_value,input_value):
logger.info(f"when context {context}")
logger.info(f"when input_value {input_value}")
try:
if not hasattr(context, 'output_files'):
context.output_files = []
reserve_ouput_file = reserve_action_run(node_value,
input_value,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(reserve_ouput_file)
except InputFormatException as e:
logger.error(e)
except CriticalDOIException as e:
logger.info(f"CRITICAL {e}")
logger.info(f"Expecting CriticalDOIException from input_value {input_value}")
logger.info(f"context.failed {context.failed}")
@then(u'The OSTI DOI is submitted to the OSTI server')
def step_doi_label_is_submitted_impl(context):
doi_config_util = DOIConfigUtil()
m_config = doi_config_util.get_config()
# Fetch the content of payload_filename into memory and change the status from status="reserved_not_submitted"
# to status="Reserved".
payload_doc = etree.parse(context.output_files[0])
payload_root = payload_doc.getroot()
# Make a new root with modified 'status' attribute to 'Reserved'
out_root = etree.Element("records")
for element in payload_root.iter():
if element.tag == 'record':
new_element = deepcopy(element)
new_element.attrib['status'] = 'Reserved'
out_root.append(new_element)
etree.indent(out_root,space=" ")
# The payload is now ready to be submitted to OSTI.
if g_submit_flag:
doi, response_str = DOIOstiWebClient().submit_content(
payload=etree.tostring(out_root)
)
else:
logger.info(f"g_submit_flag is False")
@when('reference record is drafted for node {node_value} from {input_subdir}')
def when_reference_is_drafted_from_impl(context, node_value, input_subdir):
input_dir = os.path.join(context.transaction_dir, input_subdir)
if not hasattr(context, 'output_files'):
context.output_files = []
new_draft_file = draft_action_run(node_value,
input_dir,
lid=context.random_lid if hasattr(context, 'random_lid') else None)
context.output_files.append(new_draft_file)
@given('reference transactions in {transaction_dir}')
def given_reference_dir_impl(context,transaction_dir):
context.transaction_dir = transaction_dir
@when('reference record is reserved for node {node_value} with {input_value}')
def step_reference_is_reserved_at_input_impl(context, node_value, input_value):
transaction_dir = context.transaction_dir
input_dir = os.path.join(transaction_dir,input_value)
if not hasattr(context, 'output_files'):
context.output_files = []
context.output_files.append(reserve_action_run(node_value,input_dir,
lid=context.random_lid if hasattr(context, 'random_lid') else None))
@then('produced osti record is similar to reference osti {ref_output_value}')
def step_produced_osti_record_is_similiar_to_reference_osti_impl(context, ref_output_value):
if hasattr(context, 'transaction_dir'):
ref_output_value = os.path.join(context.transaction_dir, ref_output_value)
logger.info(f"context.transaction_dir {context.transaction_dir}")
logger.info(f"context.output_files {context.output_files}")
logger.info(f"ref_output_value {ref_output_value}")
file_output_compare(context.output_files[0], ref_output_value)
@when('submit osti record for {node_value}')
def submit_osti_record(context, node_value):
try:
context.output_files[-1] = release_action_run(node_value, context.output_files[-1])
logger.info(f'record in file {context.output_files[-1]} submitted from output index {len(context.output_files)}')
except CriticalDOIException as e:
context.exception_msg = str(e)
@then('lidvid already submitted exception is raised')
def step_lidvid_already_submitted_exception_is_raised(context):
assert hasattr(context, 'exception_msg')
logger.info(f'grab first created doi from file {context.output_files}')
reserved_xml = etree.parse(context.output_files[0])
reserved_doi = reserved_xml.xpath('/records/record/doi')[0].text
excepted_exception_msg = f'There is already a DOI {reserved_doi} submitted for this lidvid {context.random_lid}::1.0 (status=Pending). You cannot submit a new DOI for the same lidvid.'
logger.info(f'expected message {excepted_exception_msg}')
logger.info(f'found msg is {context.exception_msg}')
assert context.exception_msg == excepted_exception_msg
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Watches the blockchain for new blocks
# Request chain height from grin core every x seconds.
# If the height increased request each block from grin core.
# Adds them to the database.
# This keeps a record of each block *as we see it* (before any chain reorgs).
import sys
import traceback
import requests
import json
import atexit
from time import sleep
from datetime import datetime
import pymysql
import sqlalchemy
from grinlib import lib
from grinlib import grin
from grinbase.model.blocks import Blocks
PROCESS = "blockWatcher"
LOGGER = None
CONFIG = None
BATCHSZ = 100
def main():
global CONFIG
global LOGGER
CONFIG = lib.get_config()
LOGGER = lib.get_logger(PROCESS)
LOGGER.warn("=== Starting {}".format(PROCESS))
# Connect to DB
database = lib.get_db()
atexit.register(lib.teardown_db)
# Get Config
check_interval = float(CONFIG[PROCESS]["check_interval"])
# Find the height of the latest block
current_height = grin.blocking_get_current_height()
while current_height <= 0:
LOGGER.warn("Waiting for first block height")
sleep(10)
current_height = grin.blocking_get_current_height()
LOGGER.warn("current_height = {}".format(current_height))
latest_block = Blocks.get_latest()
if latest_block is None:
last_height = current_height - 1500
else:
last_height = latest_block.height
LOGGER.warn("last_height = {}".format(last_height))
height = last_height + 1
height = max(current_height-1500, height)
LOGGER.warn("Starting at block height: {}".format(height))
while True:
try:
latest = grin.blocking_get_current_height()
LOGGER.warn("latest: {}, height: {}".format(latest, height))
while latest >= height:
response = grin.blocking_get_block_by_height(height)
LOGGER.warn("New Block: {} at {}".format(response["header"]["hash"],
response["header"]["height"]))
try:
new_block = Blocks(hash = response["header"]["hash"],
version = response["header"]["version"],
height = response["header"]["height"],
previous = response["header"]["previous"],
timestamp = datetime.strptime(response["header"]["timestamp"][:-1], "%Y-%m-%dT%H:%M:%S+00:0"),
output_root = response["header"]["output_root"],
range_proof_root = response["header"]["range_proof_root"],
kernel_root = response["header"]["kernel_root"],
nonce = response["header"]["nonce"],
edge_bits = response["header"]["edge_bits"],
total_difficulty = response["header"]["total_difficulty"],
secondary_scaling = response["header"]["secondary_scaling"],
num_inputs = len(response["inputs"]),
num_outputs = len(response["outputs"]),
num_kernels = len(response["kernels"]),
fee = sum(k["fee"] for k in response["kernels"]),
lock_height = response["kernels"][0]["lock_height"] if(len(response["kernels"])>0) else 0,
total_kernel_offset = response["header"]["total_kernel_offset"],
state = "new")
# Batch inserts when catching up
database.db.getSession().add(new_block)
if( (height % BATCHSZ == 0) or (height >= (latest-10)) ):
database.db.getSession().commit()
height = height + 1
except (sqlalchemy.exc.IntegrityError, pymysql.err.IntegrityError):
LOGGER.warn("Attempted to re-add block: {}".format(response["header"]["height"]))
database.db.getSession().rollback()
latest_block = Blocks.get_latest()
height = latest_block.height + 1
sleep(check_interval)
sys.stdout.flush()
sleep(check_interval)
except Exception as e:
LOGGER.exception("Something went wrong: {}".format(repr(e)))
database.db.getSession().rollback()
sys.stdout.flush()
sleep(check_interval)
# Should never get here, but....
LOGGER.warn("=== Completed {}".format(PROCESS))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 19:05:19 2018
@author: myoussef
"""
import ringity
import unittest
import numpy as np
import networkx as nx
class TestConversions(unittest.TestCase):
def test_ddict2dict2ddict_unweighted(self):
E = nx.erdos_renyi_graph(100,0.17)
d = dict(E.edges)
dd = ringity.methods.dict2ddict(d)
ddd = ringity.methods.ddict2dict(dd)
dddd = ringity.methods.dict2ddict(ddd)
self.assertEqual(dd, dddd)
def test_ddict2dict2ddict_weighted(self):
E = nx.erdos_renyi_graph(100,0.17)
for (u, v) in E.edges():
E[u][v]['weight'] = np.random.uniform(-1,1)
d = dict(E.edges)
dd = ringity.methods.dict2ddict(d)
ddd = ringity.methods.ddict2dict(dd)
dddd = ringity.methods.dict2ddict(ddd)
self.assertEqual(dd, dddd)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
ballresponse = [
'Yes', 'No', 'Take a wild guess...', 'Very doubtful',
'Sure', 'Without a doubt', 'Most likely', 'Might be possible',
"You'll be the judge", 'no... (╯°□°)╯︵ ┻━┻', 'no... baka',
'senpai, pls no ;-;'
]
owos = [
"✪w✪", "¤w¤", "∅w∅", "⊗w⊗", "⊕w⊕", "∞w∞", "∆w∆", "θwθ", "δwδ", "①w①", "②w②", "③w③", "④w④", "⑤w⑤", "⑥w⑥", "⑦w⑦", "⑧w⑧", "⑨w⑨",
"⑩w⑩", "⑴w⑴", "⑵w⑵", "⑶w⑶", "⑷w⑷", "⑸w⑸", "⑹w⑹", "⑺w⑺", "⑻w⑻", "⑼w⑼", "⑽w⑽", "●w●", "○w○",
"■w■", "□w□", "★w★", "☆w☆", "◆w◆", "◇w◇", "▷w◁", "◐w◐", "◑w◑", "◐w◑", "◐w◑", "♀w♀", "♂w♂", "♡w♡", "❖w❖", "✞w✞", "©w©", "®w®"
"✧w✧", "✦w✦", "✩w✩", "✫w✫", "✬w✬", "✭w✭", "✮w✮", "✯w✯", "✰w✰", "✱w✱", "✲w✲", "✵w✵", "✶w✶", "✷w✷", ">w0",
"✸w✸", "※w※","↻w↻", "σwσ", "✹w✹", "✺w✺", "✻w✻", "✼w✼", "✽w✽", "✾w✾", "✿w✿", "❀w❀", "❁w❁", "❂w❂", "❃w❃", "❅w❅",
"❆w❆", "❈w❈", "❉w❉", "❊w❊", "❋w❋", "❍w❍", "❏w❏", "❐w❐", "❑w❑", "❒w❒", "◈w◈", "◉w◉", "◊w◊", "○w○", "ФwФ", "фwф", "юwю", "ЮwЮ"
"#w#", "@w@", "0w0", ";w;", "¢w¢", "×w×", "°w°", "OwO", "owo", "uwu", "UwU", "QwQ", "ОмО", "ОпО", "ОшО", "OnO", "ДwД", "ЖwЖ", "XwX", "qwq", "dwd", "DwD" "ИwИ", "ーwー"
]
randomPlayings = [
"with OwOs", "with a ball of String", "innocent", "in her burrow!", "with her friends", "in the fields"
]
|
nilq/baby-python
|
python
|
import unicodedata
from collections import defaultdict
from itertools import zip_longest
from .porter import Stemmer
def _normalize(s):
return unicodedata.normalize("NFKD", s)
def _check_type(s):
if not isinstance(s, str):
raise TypeError("expected str or unicode, got %s" % type(s).__name__)
def levenshtein_distance(s1, s2):
_check_type(s1)
_check_type(s2)
if s1 == s2:
return 0
rows = len(s1) + 1
cols = len(s2) + 1
if not s1:
return cols - 1
if not s2:
return rows - 1
prev = None
cur = range(cols)
for r in range(1, rows):
prev, cur = cur, [r] + [0] * (cols - 1)
for c in range(1, cols):
deletion = prev[c] + 1
insertion = cur[c - 1] + 1
edit = prev[c - 1] + (0 if s1[r - 1] == s2[c - 1] else 1)
cur[c] = min(edit, deletion, insertion)
return cur[-1]
def _jaro_winkler(s1, s2, long_tolerance, winklerize):
_check_type(s1)
_check_type(s2)
s1_len = len(s1)
s2_len = len(s2)
if not s1_len or not s2_len:
return 0.0
min_len = min(s1_len, s2_len)
search_range = max(s1_len, s2_len)
search_range = (search_range // 2) - 1
if search_range < 0:
search_range = 0
s1_flags = [False] * s1_len
s2_flags = [False] * s2_len
# looking only within search range, count & flag matched pairs
common_chars = 0
for i, s1_ch in enumerate(s1):
low = max(0, i - search_range)
hi = min(i + search_range, s2_len - 1)
for j in range(low, hi + 1):
if not s2_flags[j] and s2[j] == s1_ch:
s1_flags[i] = s2_flags[j] = True
common_chars += 1
break
# short circuit if no characters match
if not common_chars:
return 0.0
# count transpositions
k = trans_count = 0
for i, s1_f in enumerate(s1_flags):
if s1_f:
for j in range(k, s2_len):
if s2_flags[j]:
k = j + 1
break
if s1[i] != s2[j]:
trans_count += 1
trans_count //= 2
# adjust for similarities in nonmatched characters
common_chars = float(common_chars)
weight = (
(
common_chars / s1_len
+ common_chars / s2_len
+ (common_chars - trans_count) / common_chars
)
) / 3
# winkler modification: continue to boost if strings are similar
if winklerize and weight > 0.7:
# adjust for up to first 4 chars in common
j = min(min_len, 4)
i = 0
while i < j and s1[i] == s2[i] and s1[i]:
i += 1
if i:
weight += i * 0.1 * (1.0 - weight)
# optionally adjust for long strings
# after agreeing beginning chars, at least two or more must agree and
# agreed characters must be > half of remaining characters
if (
long_tolerance
and min_len > 4
and common_chars > i + 1
and 2 * common_chars >= min_len + i
):
weight += (1.0 - weight) * (
float(common_chars - i - 1) / float(s1_len + s2_len - i * 2 + 2)
)
return weight
def jaro_similarity(s1, s2):
return _jaro_winkler(s1, s2, False, False) # noqa
def jaro_winkler_similarity(s1, s2, long_tolerance=False):
return _jaro_winkler(s1, s2, long_tolerance, True) # noqa
def damerau_levenshtein_distance(s1, s2):
_check_type(s1)
_check_type(s2)
len1 = len(s1)
len2 = len(s2)
infinite = len1 + len2
# character array
da = defaultdict(int)
# distance matrix
score = [[0] * (len2 + 2) for x in range(len1 + 2)]
score[0][0] = infinite
for i in range(0, len1 + 1):
score[i + 1][0] = infinite
score[i + 1][1] = i
for i in range(0, len2 + 1):
score[0][i + 1] = infinite
score[1][i + 1] = i
for i in range(1, len1 + 1):
db = 0
for j in range(1, len2 + 1):
i1 = da[s2[j - 1]]
j1 = db
cost = 1
if s1[i - 1] == s2[j - 1]:
cost = 0
db = j
score[i + 1][j + 1] = min(
score[i][j] + cost,
score[i + 1][j] + 1,
score[i][j + 1] + 1,
score[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1),
)
da[s1[i - 1]] = i
return score[len1 + 1][len2 + 1]
def soundex(s):
_check_type(s)
if not s:
return ""
s = _normalize(s)
s = s.upper()
replacements = (
("BFPV", "1"),
("CGJKQSXZ", "2"),
("DT", "3"),
("L", "4"),
("MN", "5"),
("R", "6"),
)
result = [s[0]]
count = 1
# find would-be replacment for first character
for lset, sub in replacements:
if s[0] in lset:
last = sub
break
else:
last = None
for letter in s[1:]:
for lset, sub in replacements:
if letter in lset:
if sub != last:
result.append(sub)
count += 1
last = sub
break
else:
if letter != "H" and letter != "W":
# leave last alone if middle letter is H or W
last = None
if count == 4:
break
result += "0" * (4 - count)
return "".join(result)
def hamming_distance(s1, s2):
_check_type(s1)
_check_type(s2)
# ensure length of s1 >= s2
if len(s2) > len(s1):
s1, s2 = s2, s1
# distance is difference in length + differing chars
distance = len(s1) - len(s2)
for i, c in enumerate(s2):
if c != s1[i]:
distance += 1
return distance
def nysiis(s):
_check_type(s)
if not s:
return ""
s = s.upper()
key = []
# step 1 - prefixes
if s.startswith("MAC"):
s = "MCC" + s[3:]
elif s.startswith("KN"):
s = s[1:]
elif s.startswith("K"):
s = "C" + s[1:]
elif s.startswith(("PH", "PF")):
s = "FF" + s[2:]
elif s.startswith("SCH"):
s = "SSS" + s[3:]
# step 2 - suffixes
if s.endswith(("IE", "EE")):
s = s[:-2] + "Y"
elif s.endswith(("DT", "RT", "RD", "NT", "ND")):
s = s[:-2] + "D"
# step 3 - first character of key comes from name
key.append(s[0])
# step 4 - translate remaining chars
i = 1
len_s = len(s)
while i < len_s:
ch = s[i]
if ch == "E" and i + 1 < len_s and s[i + 1] == "V":
ch = "AF"
i += 1
elif ch in "AEIOU":
ch = "A"
elif ch == "Q":
ch = "G"
elif ch == "Z":
ch = "S"
elif ch == "M":
ch = "N"
elif ch == "K":
if i + 1 < len(s) and s[i + 1] == "N":
ch = "N"
else:
ch = "C"
elif ch == "S" and s[i + 1 : i + 3] == "CH":
ch = "SS"
i += 2
elif ch == "P" and i + 1 < len(s) and s[i + 1] == "H":
ch = "F"
i += 1
elif ch == "H" and (
s[i - 1] not in "AEIOU"
or (i + 1 < len(s) and s[i + 1] not in "AEIOU")
or (i + 1 == len(s))
):
if s[i - 1] in "AEIOU":
ch = "A"
else:
ch = s[i - 1]
elif ch == "W" and s[i - 1] in "AEIOU":
ch = s[i - 1]
if ch[-1] != key[-1][-1]:
key.append(ch)
i += 1
key = "".join(key)
# step 5 - remove trailing S
if key.endswith("S") and key != "S":
key = key[:-1]
# step 6 - replace AY w/ Y
if key.endswith("AY"):
key = key[:-2] + "Y"
# step 7 - remove trailing A
if key.endswith("A") and key != "A":
key = key[:-1]
# step 8 was already done
return key
def match_rating_codex(s):
_check_type(s)
s = s.upper()
codex = []
prev = None
for i, c in enumerate(s):
# not a space OR
# starting character & vowel
# or consonant not preceded by same consonant
if c != " " and (i == 0 and c in "AEIOU") or (c not in "AEIOU" and c != prev):
codex.append(c)
prev = c
# just use first/last 3
if len(codex) > 6:
return "".join(codex[:3] + codex[-3:])
else:
return "".join(codex)
def match_rating_comparison(s1, s2):
codex1 = match_rating_codex(s1)
codex2 = match_rating_codex(s2)
len1 = len(codex1)
len2 = len(codex2)
res1 = []
res2 = []
# length differs by 3 or more, no result
if abs(len1 - len2) >= 3:
return None
# get minimum rating based on sums of codexes
lensum = len1 + len2
if lensum <= 4:
min_rating = 5
elif lensum <= 7:
min_rating = 4
elif lensum <= 11:
min_rating = 3
else:
min_rating = 2
# strip off common prefixes
for c1, c2 in zip_longest(codex1, codex2):
if c1 != c2:
if c1:
res1.append(c1)
if c2:
res2.append(c2)
unmatched_count1 = unmatched_count2 = 0
for c1, c2 in zip_longest(reversed(res1), reversed(res2)):
if c1 != c2:
if c1:
unmatched_count1 += 1
if c2:
unmatched_count2 += 1
return (6 - max(unmatched_count1, unmatched_count2)) >= min_rating
def metaphone(s):
_check_type(s)
result = []
s = _normalize(s.lower())
# skip first character if s starts with these
if s.startswith(("kn", "gn", "pn", "wr", "ae")):
s = s[1:]
i = 0
while i < len(s):
c = s[i]
next = s[i + 1] if i < len(s) - 1 else "*****"
nextnext = s[i + 2] if i < len(s) - 2 else "*****"
# skip doubles except for cc
if c == next and c != "c":
i += 1
continue
if c in "aeiou":
if i == 0 or s[i - 1] == " ":
result.append(c)
elif c == "b":
if (not (i != 0 and s[i - 1] == "m")) or next:
result.append("b")
elif c == "c":
if next == "i" and nextnext == "a" or next == "h":
result.append("x")
i += 1
elif next in "iey":
result.append("s")
i += 1
else:
result.append("k")
elif c == "d":
if next == "g" and nextnext in "iey":
result.append("j")
i += 2
else:
result.append("t")
elif c in "fjlmnr":
result.append(c)
elif c == "g":
if next in "iey":
result.append("j")
elif next == "h" and nextnext and nextnext not in "aeiou":
i += 1
elif next == "n" and not nextnext:
i += 1
else:
result.append("k")
elif c == "h":
if i == 0 or next in "aeiou" or s[i - 1] not in "aeiou":
result.append("h")
elif c == "k":
if i == 0 or s[i - 1] != "c":
result.append("k")
elif c == "p":
if next == "h":
result.append("f")
i += 1
else:
result.append("p")
elif c == "q":
result.append("k")
elif c == "s":
if next == "h":
result.append("x")
i += 1
elif next == "i" and nextnext in "oa":
result.append("x")
i += 2
else:
result.append("s")
elif c == "t":
if next == "i" and nextnext in "oa":
result.append("x")
elif next == "h":
result.append("0")
i += 1
elif next != "c" or nextnext != "h":
result.append("t")
elif c == "v":
result.append("f")
elif c == "w":
if i == 0 and next == "h":
i += 1
result.append("w")
elif next in "aeiou":
result.append("w")
elif c == "x":
if i == 0:
if next == "h" or (next == "i" and nextnext in "oa"):
result.append("x")
else:
result.append("s")
else:
result.append("k")
result.append("s")
elif c == "y":
if next in "aeiou":
result.append("y")
elif c == "z":
result.append("s")
elif c == " ":
if len(result) > 0 and result[-1] != " ":
result.append(" ")
i += 1
return "".join(result).upper()
def porter_stem(s):
_check_type(s)
return Stemmer(s).stem()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
def findstem(arr):
# Determine size of the array
n = len(arr)
# Take first word from array
# as reference
s = arr[0]
l = len(s)
res = ""
for i in range(l):
for j in range(i + 1, l + 1):
# generating all possible substrings
# of our reference string arr[0] i.e s
stem = s[i:j]
k = 1
for k in range(1, n):
# Check if the generated stem is
# common to all words
if stem not in arr[k]:
break
# If current substring is present in
# all strings and its length is greater
# than current result
if (k + 1 == n and len(res) < len(stem)):
res = stem
return res
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#
# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset
"""Helper script to convert the NYU Depth v2 dataset Matlab file into a set of PNG and JPEG images.
Receives 3 Files from argparse:
<h5_file> - Contains the original images, depths maps, and scene types
<train_test_split> - contains two numpy arrays with the index of the
images based on the split to train and test sets.
<out_folder> - Name of the folder to save the original and depth images.
Every image in the DB will have it's twine B&W image that indicates the depth
in the image. the images will be read, converted by the convert_image function
and finally saved to path based on train test split and Scene types.
"""
from __future__ import print_function
import h5py
import numpy as np
import os
import scipy.io
import sys
import cv2
from tqdm import tqdm
def convert_image(index, depth_map, img, output_folder):
"""Processes data images and depth maps
:param index: int, image index
:param depth_map: numpy array, image depth - 2D array.
:param img: numpy array, the original RGB image - 3D array.
:param output_folder: path to save the image in.
Receives an image with it's relevant depth map.
Normalizes the depth map, and adds a 7 px boundary to the original image.
Saves both image and depth map to the appropriate processed data folder.
"""
# Normalize the depth image
# normalized_depth = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX)
img_depth = depth_map * 25.0
cv2.imwrite("%s/%05d_depth.png" % (output_folder, index), img_depth)
# Adding black frame to original image
img = img[:, :, ::-1] # Flipping the image from RGB to BGR for opencv
image_black_boundary = np.zeros(img.shape, dtype=np.uint8)
image_black_boundary[7:image_black_boundary.shape[0] - 6, 7:image_black_boundary.shape[1] - 6, :] = \
img[7:img.shape[0] - 6, 7:img.shape[1] - 6, :]
cv2.imwrite("%s/%05d.jpg" % (output_folder, index), image_black_boundary)
if __name__ == "__main__":
# Check if got all needed input for argparse
if len(sys.argv) != 4:
print("usage: %s <h5_file> <train_test_split> <out_folder>" % sys.argv[0], file=sys.stderr)
sys.exit(0)
# load arguments to variables
h5_file = h5py.File(sys.argv[1], "r")
train_test = scipy.io.loadmat(sys.argv[2]) # h5py is not able to open that file. but scipy is
out_folder = sys.argv[3]
# Extract images *indexes* for train and test data sets
test_images = set([int(x) for x in train_test["testNdxs"]])
train_images = set([int(x) for x in train_test["trainNdxs"]])
print("%d training images" % len(train_images))
print("%d test images" % len(test_images))
# Grayscale
depth = h5_file['depths']
print("Reading", sys.argv[1])
images = h5_file['images'] # (num_channels, height, width)
# Extract all sceneTypes per image - "office", "classroom", etc.
scenes = [u''.join(chr(c[0]) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
for i, image in tqdm(enumerate(images), desc="Processing images", total=len(images)):
idx = int(i) + 1
if idx in train_images:
train_test = "train"
else:
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
train_test = "test"
# Create path to save image in
folder = "%s/%s/%s" % (out_folder, train_test, scenes[i])
if not os.path.exists(folder):
os.makedirs(folder)
convert_image(i, depth[i, :, :].T, image.T, folder)
print("Finished")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Last modified: Tue, 23 Jan 2018 23:39:11 +0900
#
# try import libsbml
try:
from libsbml import ASTNode
from libsbml import AST_PLUS
from libsbml import AST_MINUS
from libsbml import AST_TIMES
from libsbml import formulaToString
from libsbml import readSBMLFromFile
except ImportError:
from libsbml import ASTNode
from libsbml import AST_PLUS
from libsbml import AST_MINUS
from libsbml import AST_TIMES
from libsbml import formulaToString
from libsbml import readSBMLFromFile
class Converter():
def __init__(self, filepath="", sbmldocument=None):
self.filepath = filepath
self.clear_objects()
# try SBMLDocument at first, and then SBML file
if sbmldocument is not None:
self.sbmldocument = sbmldocument
elif filepath is not "":
self.sbmldocument = readSBMLFromFile(filepath)
self.update_sbmldocument(self.sbmldocument)
def clear_objects(self):
self.pars = {}
self.icdict = {}
self.varspecs = {}
self.functions = {}
self.funcargs = {}
def update_sbmlfile(self, filepath=""):
if filepath is not "":
self.filepath = filepath
self.sbmldocument = readSBMLFromFile(filepath)
self.update_sbmldocument(self.sbmldocument)
def update_sbmldocument(self, sbmldocument):
if sbmldocument is not None:
self.sbmlmodel = sbmldocument.getModel()
self.filepath = ""
self.clear_objects()
self.generate_pars(self.sbmlmodel)
self.generate_icdict(self.sbmlmodel)
self.generate_varspecs(self.sbmlmodel)
self.generate_functions(self.sbmlmodel)
def generate_pars(self, model):
# global parameters
for p in model.getListOfParameters():
self.pars[p.getId()] = p.getValue()
# local parameters
for r in model.getListOfReactions():
k = r.getKineticLaw()
for p in r.getKineticLaw().getListOfParameters():
# we assume there is no conflict on parameter id
assert p.getId() not in self.pars, "Please rename your parameter id so that there is no conflict between local and global parameters."
self.pars[p.getId()] = p.getValue()
# compartments
for p in model.getListOfCompartments():
self.pars[p.getId()] = p.getSize()
def generate_icdict(self, model):
for s in model.getListOfSpecies():
if s.isSetInitialConcentration():
self.icdict[s.getId()] = s.getInitialConcentration()
elif s.isSetInitialAmount():
self.icdict[s.getId()] = s.getInitialAmount()
def is_species_reactant_of(self, species, reaction):
for sr in reaction.getListOfReactants():
if sr.getSpecies() == species.getId():
return True
return False
def is_species_product_of(self, species, reaction):
for sr in reaction.getListOfProducts():
if sr.getSpecies() == species.getId():
return True
return False
def add_ast_as_reactant(self, ast, r):
if ast is None: # if there is no parent, return -1 * v1.
root = ASTNode(AST_TIMES)
l = ASTNode()
l.setValue(-1.0)
root.addChild(l)
root.addChild(r.getKineticLaw().getMath().deepCopy())
else:
root = ASTNode(AST_MINUS)
root.addChild(ast)
root.addChild(r.getKineticLaw().getMath().deepCopy())
return root
def add_ast_as_product(self, ast, r):
if ast is None: # if there is no parent, return v1.
root = r.getKineticLaw().getMath().deepCopy()
else:
root = ASTNode(AST_PLUS)
root.addChild(ast)
root.addChild(r.getKineticLaw().getMath().deepCopy())
return root
def generate_varspecs(self, model):
# Generate Rate equation for all variable Species (ex. dx/dt = v1 - v2 + v3).
for s in model.getListOfSpecies():
#if s.isSetBoundaryCondition() or s.isSetConstant:
# continue
root = None
for r in model.getListOfReactions():
if self.is_species_reactant_of(s, r):
root = self.add_ast_as_reactant(root, r)
if self.is_species_product_of(s, r):
root = self.add_ast_as_product(root, r)
if root is not None:
self.varspecs[s.getId()] = formulaToString(root)
def generate_functions(self, model):
# global parameters
for f in model.getListOfFunctionDefinitions():
ast = f.getMath()
idx = ast.getNumChildren() - 1
ast_func = ast.getChild(idx) # most right child is the function
self.functions[f.getId()] = formulaToString(ast_func)
arglist = []
for i in range(ast.getNumChildren() - 1):
child = ast.getChild(i)
arglist.append(child.getName())
self.funcargs[f.getId()] = arglist
|
nilq/baby-python
|
python
|
import threading
import Pyro4
class NameServerInThread(threading.Thread):
def __init__(self):
super(NameServerInThread, self).__init__()
self.name_server_daemon = None
@staticmethod
def is_name_server_started():
try:
ns = Pyro4.locateNS()
return True
except:
return False
def name_server_msg_loop(self):
ns_uri, daemon, broadcast_server = Pyro4.naming.startNS()
self.name_server_daemon = daemon
print ns_uri, daemon, broadcast_server
try:
daemon.requestLoop()
except:
import traceback
traceback.print_exc()
finally:
daemon.close()
if broadcast_server is not None:
broadcast_server.close()
def run(self):
if self.is_name_server_started():
raise "Name server running"
self.name_server_msg_loop()
print("NS shut down.")
def shutdown(self):
self.name_server_daemon.shutdown()
|
nilq/baby-python
|
python
|
'''
Wrapper for bert embeddings
'''
import numpy as np
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel
class BertEmbeddings:
def __init__(self, model_name='bert-base-uncased', cache_dir=None, max_seq_length=64, max_batch_size=64, stats_count=False):
'''
:param normalize: whether to L2 normalize the embedding vectors to 1.0
'''
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('BertEmbeddings DEVICE: ', self.device)
self.tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
self.model = BertModel.from_pretrained(model_name, cache_dir=cache_dir)
self.model.to(self.device)
self.model.eval()
#debug stats
self.stats_count = stats_count
if self.stats_count:
self.unks = 0
self.total_toks = 0
def get_unk_ratio(self):
return float(self.unks)/self.total_toks
def is_context_sensitive(self):
return False
def is_seq_embedder(self):
'''
This embedder embed the entire text sequence into a single vector (not vector per word)
'''
return True
def size(self):
return -1
def units(self):
return -1
def __contains__(self, w):
return True
def tokenize_text(self, text):
# Tokenized input
tokenized_text = self.tokenizer.tokenize(' '.join(text))
if len(tokenized_text) > self.max_seq_length-2:
tokenized_text = tokenized_text[:self.max_seq_length-2]
if self.stats_count:
self.unks += tokenized_text.count('[UNK]')
self.total_toks += len(tokenized_text)
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
mask_ids = [1]*len(indexed_tokens)
indexed_tokens.extend([0]*((self.max_seq_length-2)-len(indexed_tokens)))
mask_ids.extend([0]*((self.max_seq_length-2)-len(mask_ids)))
segments_ids = [0] * len(indexed_tokens)
return indexed_tokens, segments_ids, mask_ids
def represent_text_batch(self, text_batch):
represented_num = 0
encoded_instances = []
while represented_num < len(text_batch):
n = min(self.max_batch_size, len(text_batch)-represented_num)
encoded_n = self.represent_small_text_batch(text_batch[represented_num:represented_num+n])
encoded_instances.append(encoded_n)
represented_num += n
if len(encoded_instances) > 1:
# print('Large batch size:', len(text_batch))
return np.concatenate(encoded_instances, axis=0)
else:
return encoded_instances[0]
def represent_small_text_batch(self, text_batch):
indexed_tokens_batch, segments_ids_batch, mask_ids_batch = zip(*[self.tokenize_text(text) for text in text_batch])
tokens_tensor = torch.tensor(indexed_tokens_batch, device=self.device)
segments_tensor = torch.tensor(segments_ids_batch, device=self.device)
masks_tensor = torch.tensor(mask_ids_batch, device=self.device)
encoded_words, encoded_text = self.model(tokens_tensor, segments_tensor, attention_mask=masks_tensor, output_all_encoded_layers=False)
return encoded_text.detach().cpu().numpy()
# def represent_text(self, text):
# with torch.cuda.device(self.gpu):
# # Tokenized input
# tokenized_text = self.tokenizer.tokenize(' '.join(text))
# # Convert token to vocabulary indices
# indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# segments_ids = [0] * len(indexed_tokens)
# # Convert inputs to PyTorch tensors
# tokens_tensor = torch.tensor([indexed_tokens])
# segments_tensors = torch.tensor([segments_ids])
# # Predict hidden states features for each layer
# encoded_words, encoded_text = self.model(tokens_tensor, segments_tensors, output_all_encoded_layers=False)
# return encoded_text.detach().numpy()
if __name__ == '__main__':
bert = BertEmbeddings()
embeddings = bert.represent_text('This is a test yes')
print(embeddings.shape)
|
nilq/baby-python
|
python
|
"""Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Tests for source_match.py
"""
import unittest
import create_node
import source_match
DEFAULT_TEXT = 'default'
class TextPlaceholderTest(unittest.TestCase):
def testMatchSimpleText(self):
placeholder = source_match.TextPlaceholder('.*', DEFAULT_TEXT)
matched_text = placeholder.Match(None, 'to match')
self.assertEqual(matched_text, 'to match')
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, 'to match')
def testPartialMatchEnd(self):
placeholder = source_match.TextPlaceholder(r'def \(', DEFAULT_TEXT)
matched_text = placeholder.Match(None, 'def (foo')
self.assertEqual(matched_text, 'def (')
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, 'def (')
def testMatchWithoutMatchingReturnsDefault(self):
placeholder = source_match.TextPlaceholder('.*', DEFAULT_TEXT)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, DEFAULT_TEXT)
def testCantMatchThrowsError(self):
placeholder = source_match.TextPlaceholder('doesnt match', DEFAULT_TEXT)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(None, 'to match')
def testMatchWhitespace(self):
whitespace_text = ' \t \n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testWhitespaceMatchesLineContinuations(self):
whitespace_text = ' \t \n \\\n \\\n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testWhitespaceMatchesComments(self):
whitespace_text = ' \t # abc\n '
placeholder = source_match.TextPlaceholder(r'\s*')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testMultipleStatementsSeparatedBySemicolon(self):
whitespace_text = 'pdb;pdb'
placeholder = source_match.TextPlaceholder(r'pdb\npdb')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
def testCommentAfterExpectedLinebreak(self):
whitespace_text = 'pdb # A comment\n'
placeholder = source_match.TextPlaceholder(r'pdb\n')
matched_text = placeholder.Match(None, whitespace_text)
self.assertEqual(matched_text, whitespace_text)
test_output = placeholder.GetSource(None)
self.assertEqual(test_output, whitespace_text)
class FieldPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
node = create_node.Name('foobar')
placeholder = source_match.FieldPlaceholder('id')
matched_text = placeholder.Match(node, 'foobar')
self.assertEqual(matched_text, 'foobar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar')
def testPartialMatch(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('foo'))
matched_text = placeholder.Match(node, 'foobarbaz')
self.assertEqual(matched_text, 'foobar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar')
def testBeforePlaceholder(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id',
before_placeholder=source_match.TextPlaceholder('before '))
matched_text = placeholder.Match(node, 'before bar')
self.assertEqual(matched_text, 'before bar')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'before bar')
def testCantMatchThrowsError(self):
node = create_node.Name('doesnt_match')
placeholder = source_match.FieldPlaceholder('id')
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'to match')
def testRaisesErrorIfFieldIsList(self):
node = create_node.FunctionDef('function_name')
placeholder = source_match.FieldPlaceholder('body')
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'invalid_match')
def testChangingValueChangesOutput(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('foo'))
matched_text = placeholder.Match(node, 'foobarbaz')
self.assertEqual(matched_text, 'foobar')
node.id = 'hello'
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foohello')
def testWithoutMatch(self):
node = create_node.Name('bar')
placeholder = source_match.FieldPlaceholder('id')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'bar')
class ListFieldPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.FunctionDef('function_name', body=[body_node])
placeholder = source_match.ListFieldPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n')
self.assertEqual(matched_text, 'foobar\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n')
def testMultipleListItems(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\nbaz\n')
self.assertEqual(matched_text, 'foobar\nbaz\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\nbaz\n')
def testMultipleListItemsBeginningAndEnd(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder(
'body',
before_placeholder=source_match.TextPlaceholder('z'),
after_placeholder=source_match.TextPlaceholder('zz'))
matched_text = placeholder.Match(node, 'zfoobar\nzzzbaz\nzz')
self.assertEqual(matched_text, 'zfoobar\nzzzbaz\nzz')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'zfoobar\nzzzbaz\nzz')
def testMatchRaisesErrorIfFieldIsNotList(self):
node = create_node.Name('bar')
placeholder = source_match.ListFieldPlaceholder(
'id', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'foobar\nbaz')
def testMatchRaisesErrorIfFieldDoesntMatch(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.FunctionDef('function_name', body=[body_node])
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'no match here')
def testMatchRaisesErrorIfSeparatorDoesntMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('\n', '\n'),
exclude_first_before=True)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
placeholder.Match(node, 'foobarbaz')
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testListDefaults(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
module_node = create_node.Module(node)
placeholder = source_match.ListFieldPlaceholder(
'body', before_placeholder=source_match.TextPlaceholder('', ', '),
exclude_first_before=True)
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, ' foobar\n, baz\n')
class BodyPlaceholderTest(unittest.TestCase):
def testMatchSimpleField(self):
body_node = create_node.Expr(create_node.Name('foobar'))
node = create_node.Module(body_node)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n')
self.assertEqual(matched_text, 'foobar\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n')
def testMatchFieldAddsEmptySyntaxFreeLine(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.Module(body_node_foobar, body_node_a)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n\na\n')
self.assertEqual(matched_text, 'foobar\n\na\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n\na\n')
def testMatchFieldAddsEmptySyntaxFreeLineWithComment(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.Module(body_node_foobar, body_node_a)
placeholder = source_match.BodyPlaceholder('body')
matched_text = placeholder.Match(node, 'foobar\n#blah\na\n')
self.assertEqual(matched_text, 'foobar\n#blah\na\n')
test_output = placeholder.GetSource(node)
self.assertEqual(test_output, 'foobar\n#blah\na\n')
def testDoesntMatchAfterEndOfBody(self):
body_node_foobar = create_node.Expr(create_node.Name('foobar'))
body_node_a = create_node.Expr(create_node.Name('a'))
node = create_node.FunctionDef('a', body=[body_node_foobar, body_node_a])
matcher = source_match.GetMatcher(node)
text_to_match = """def a():
foobar
#blah
a
# end comment
c
"""
matched_text = matcher.Match(text_to_match)
expected_match = """def a():
foobar
#blah
a
"""
self.assertEqual(matched_text, expected_match)
class TestDefaultSourceMatcher(unittest.TestCase):
def testInvalidExpectedPartsType(self):
node = create_node.Name('bar')
with self.assertRaises(ValueError):
source_match.DefaultSourceMatcher(node, ['blah'])
def testBasicTextMatch(self):
matcher = source_match.DefaultSourceMatcher(
None, [source_match.TextPlaceholder('blah', DEFAULT_TEXT)])
matcher.Match('blah')
self.assertEqual(matcher.GetSource(), 'blah')
def testRaisesErrorIfNoTextMatch(self):
matcher = source_match.DefaultSourceMatcher(
None, [source_match.TextPlaceholder('blah', DEFAULT_TEXT)])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('bla')
def testBasicFieldMatch(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
matcher.Match('bar')
self.assertEqual(matcher.GetSource(), 'bar')
def testRaisesErrorIfNoFieldMatch(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('ba')
def testBasicFieldMatchWhenChangedFieldValue(self):
node = create_node.Name('bar')
matcher = source_match.DefaultSourceMatcher(
node, [source_match.FieldPlaceholder('id')])
matcher.Match('bar')
node.id = 'foo'
self.assertEqual(matcher.GetSource(), 'foo')
def testBasicListMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node, [source_match.ListFieldPlaceholder('body')])
matcher.Match('foobar\nbaz\n')
self.assertEqual(matcher.GetSource(), 'foobar\nbaz\n')
def testRaisesErrorWhenNoMatchInBasicList(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node, [source_match.ListFieldPlaceholder('body')])
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match('foobar\nba\n')
def testBasicListMatchWhenChangedFieldValue(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.ListFieldPlaceholder('body')])
matcher.Match('foobar\nbaz\n')
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(), 'hello\nbaz\n')
def testAdvancedMatch(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.TextPlaceholder('def ', 'def '),
source_match.FieldPlaceholder('name'),
source_match.TextPlaceholder(r'\(\)', r'()'),
source_match.ListFieldPlaceholder('body')])
matcher.Match('def function_name()foobar\nbaz\n')
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(), 'def function_name()hello\nbaz\n')
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testGetSourceWithoutMatchUsesDefaults(self):
body_nodes = [create_node.Expr(create_node.Name('foobar')),
create_node.Expr(create_node.Name('baz'))]
node = create_node.FunctionDef('function_name', body=body_nodes)
module_node = create_node.Module(node)
matcher = source_match.DefaultSourceMatcher(
node,
[source_match.TextPlaceholder('def ', 'default '),
source_match.FieldPlaceholder('name'),
source_match.TextPlaceholder(r'\(\)', r'()'),
source_match.SeparatedListFieldPlaceholder(
'body', source_match.TextPlaceholder('\n', ', '))])
node.body[0].value.id = 'hello'
self.assertEqual(matcher.GetSource(),
'default function_name() hello\n, baz\n')
class TestGetMatcher(unittest.TestCase):
def testDefaultMatcher(self):
node = create_node.VarReference('foo', 'bar')
matcher = source_match.GetMatcher(node)
matcher.Match('foo.bar')
self.assertEqual(matcher.GetSource(), 'foo.bar')
def testDefaultMatcherWithModification(self):
node = create_node.VarReference('foo', 'bar')
matcher = source_match.GetMatcher(node)
matcher.Match('foo.bar')
node.attr = 'hello'
self.assertEqual(matcher.GetSource(), 'foo.hello')
class ParenWrappedTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Name('a')
string = '(a)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNewLineMatch(self):
node = create_node.Name('a')
string = '(\na\n)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithComplexLine(self):
node = create_node.Compare('a', '<', 'c')
string = '(a < \n c\n)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithTuple(self):
node = create_node.Call('c', args=[create_node.Name('d'),
create_node.Tuple('a', 'b')])
string = 'c(d, (a, b))'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ArgumentsMatcherTest(unittest.TestCase):
def testEmpty(self):
node = create_node.arguments()
string = ''
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSingleArg(self):
node = create_node.arguments(args=('a'))
string = 'a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultipleArgs(self):
node = create_node.arguments(args=('a', 'b'))
string = 'a, b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefault(self):
node = create_node.arguments(keys=('a'), values=('b'))
string = 'a=b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefaults(self):
node = create_node.arguments(keys=('a', 'c'), values=('b', 'd'))
string = 'a=b, c=d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsAndDefaults(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'))
string = 'e, f, a=b, c=d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargs(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args')
string = 'e, f, a=b, c=d, *args'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargsKwargs(self):
node = create_node.arguments(
args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args', kwarg_name='kwargs')
string = 'e, f, a=b, c=d, *args, **kwargs'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AssertMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Assert(create_node.Name('a'))
string = 'assert a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithMessage(self):
node = create_node.Assert(create_node.Name('a'),
create_node.Str('message'))
string = 'assert a, "message"\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AttributeMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.VarReference('a', 'b')
string = 'a.b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testTripleReferenceMatch(self):
node = create_node.VarReference('a', 'b', 'c')
string = 'a.b.c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class AugAssignMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.AugAssign('a', create_node.Add(), create_node.Num(1))
string = 'a += 1\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class BinOpMatcherTest(unittest.TestCase):
def testAddBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Add(),
create_node.Name('b'))
string = 'a + b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSubBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Sub(),
create_node.Name('b'))
string = 'a - b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Mult(),
create_node.Name('b'))
string = 'a * b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDivBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Div(),
create_node.Name('b'))
string = 'a / b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testFloorDivBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.FloorDiv(),
create_node.Name('b'))
string = 'a // b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testModBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Mod(),
create_node.Name('b'))
string = 'a % b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testPowBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.Pow(),
create_node.Name('b'))
string = 'a ** b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLShiftBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.LShift(),
create_node.Name('b'))
string = 'a << b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testRShiftBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.RShift(),
create_node.Name('b'))
string = 'a >> b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitOrBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitOr(),
create_node.Name('b'))
string = 'a | b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitXorBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitXor(),
create_node.Name('b'))
string = 'a ^ b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBitAndBinOp(self):
node = create_node.BinOp(
create_node.Name('a'),
create_node.BitAnd(),
create_node.Name('b'))
string = 'a & b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class BoolOpMatcherTest(unittest.TestCase):
def testAndBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.And(),
create_node.Name('b'))
string = 'a and b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testOrBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.Or(),
create_node.Name('b'))
string = 'a or b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testAndOrBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.And(),
create_node.Name('b'),
create_node.Or(),
create_node.Name('c'))
string = 'a and b or c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testOrAndBoolOp(self):
node = create_node.BoolOp(
create_node.Name('a'),
create_node.Or(),
create_node.Name('b'),
create_node.And(),
create_node.Name('c'))
string = 'a or b and c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class CallMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Call('a')
string = 'a()'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchStarargs(self):
node = create_node.Call('a', starargs='args')
string = 'a(*args)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithStarargsBeforeKeyword(self):
node = create_node.Call('a', keys=('b',), values=('c',), starargs='args')
string = 'a(*args, b=c)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ClassDefMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ClassDef('TestClass')
string = 'class TestClass():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchBases(self):
node = create_node.ClassDef(
'TestClass', bases=('Base1', 'Base2'))
string = 'class TestClass(Base1, Base2):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchBody(self):
node = create_node.ClassDef(
'TestClass', body=[create_node.Expr(create_node.Name('a'))])
string = 'class TestClass():\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchDecoratorList(self):
node = create_node.ClassDef(
'TestClass',
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testComplete(self):
node = create_node.ClassDef(
'TestClass',
bases=('Base1', 'Base2'),
body=[create_node.Expr(create_node.Name('a'))],
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass(Base1, Base2):\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testCanChangeValues(self):
node = create_node.ClassDef(
'TestClass',
bases=('Base1', 'Base2'),
body=[create_node.Expr(create_node.Name('a'))],
decorator_list=[create_node.Name('dec'),
create_node.Call('dec2')])
string = '@dec\n@dec2()\nclass TestClass(Base1, Base2):\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.bases = [create_node.Name('Base3')]
node.decorator_list = [create_node.Name('dec3')]
node.body[0].value.id = 'x'
node.name = 'TestClass2'
changed_string = '@dec3\nclass TestClass2(Base3):\n x\n'
self.assertEqual(changed_string, matcher.GetSource())
class CompareMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'))
string = 'a < b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultiMatch(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'),
create_node.Lt(),
create_node.Name('c'))
string = 'a < b < c'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testEq(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Eq(),
create_node.Name('b'))
string = 'a == b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotEq(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.NotEq(),
create_node.Name('b'))
string = 'a != b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLt(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Lt(),
create_node.Name('b'))
string = 'a < b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testLtE(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.LtE(),
create_node.Name('b'))
string = 'a <= b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testGt(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Gt(),
create_node.Name('b'))
string = 'a > b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testGtE(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.GtE(),
create_node.Name('b'))
string = 'a >= b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIs(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.Is(),
create_node.Name('b'))
string = 'a is b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIsNot(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.IsNot(),
create_node.Name('b'))
string = 'a is not b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIn(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.In(),
create_node.Name('b'))
string = 'a in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotIn(self):
node = create_node.Compare(
create_node.Name('a'),
create_node.NotIn(),
create_node.Name('b'))
string = 'a not in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.comprehension('a', 'b')
string = 'for a in b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.comprehension(
'a', 'b',
create_node.Compare('c', '<', 'd'))
string = 'for a in b if c < d'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class DictMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Dict([create_node.Name('a')],
[create_node.Name('b')])
string = '{a: b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testEmptyMatch(self):
node = create_node.Dict()
string = '{}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testTwoItemMatch(self):
node = create_node.Dict(
[create_node.Name('a'), create_node.Str('c')],
[create_node.Name('b'), create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeKey(self):
first_key = create_node.Name('a')
node = create_node.Dict(
[first_key, create_node.Str('c')],
[create_node.Name('b'), create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
first_key.id = 'k'
self.assertEqual('{k: b, "c": "d"}', matcher.GetSource())
def testChangeVal(self):
first_val = create_node.Name('b')
node = create_node.Dict(
[create_node.Name('a'), create_node.Str('c')],
[first_val, create_node.Str('d')])
string = '{a: b, "c": "d"}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
first_val.id = 'k'
self.assertEqual('{a: k, "c": "d"}', matcher.GetSource())
class DictComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.DictComp('e', 'f', 'a', 'b')
string = '{e: f for a in b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.DictComp(
'e', 'f', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '{e: f for a in b if c < d}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ExceptHandlerMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ExceptHandler()
string = 'except:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithType(self):
node = create_node.ExceptHandler('TestException')
string = 'except TestException:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithName(self):
node = create_node.ExceptHandler('TestException', name='as_part')
string = 'except TestException as as_part:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithBody(self):
node = create_node.ExceptHandler(
body=[create_node.Expr(create_node.Name('a'))])
string = 'except:\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class FunctionDefMatcherTest(unittest.TestCase):
def testEmpty(self):
node = create_node.FunctionDef('test_fun')
string = 'def test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testSingleArg(self):
node = create_node.FunctionDef('test_fun', args=('a'))
string = 'def test_fun(a):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMultipleArgs(self):
node = create_node.FunctionDef('test_fun', args=('a', 'b'))
string = 'def test_fun(a, b):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefault(self):
node = create_node.FunctionDef('test_fun', keys=('a'), values=('b'))
string = 'def test_fun(a=b):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDefaults(self):
node = create_node.FunctionDef(
'test_fun', keys=('a', 'c'), values=('b', 'd'))
string = 'def test_fun(a=b, c=d):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsAndDefaults(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'))
string = 'def test_fun(e, f, a=b, c=d):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargs(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args')
string = 'def test_fun(e, f, a=b, c=d, *args):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testArgsDefaultsVarargsKwargs(self):
node = create_node.FunctionDef(
'test_fun', args=('e', 'f'), keys=('a', 'c'), values=('b', 'd'),
vararg_name='args', kwarg_name='kwargs')
string = 'def test_fun(e, f, a=b, c=d, *args, **kwargs):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testDecoratorList(self):
node = create_node.FunctionDef(
'test_fun',
decorator_list=[create_node.Name('dec'),
create_node.Call('call_dec')])
string = '@dec\n@call_dec()\ndef test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testCommentInDecoratorList(self):
node = create_node.FunctionDef(
'test_fun',
decorator_list=[create_node.Name('dec'),
create_node.Call('call_dec')])
string = '@dec\n#hello world\n@call_dec()\ndef test_fun():\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBody(self):
node = create_node.FunctionDef(
'test_fun',
body=(create_node.Expr(create_node.Name('a')),))
string = 'def test_fun():\n a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class IfMatcherTest(unittest.TestCase):
def testBasicIf(self):
node = create_node.If(
create_node.Name('True'))
string = """if True:\n pass\n"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicIfElse(self):
node = create_node.If(
create_node.Name('True'), orelse=[create_node.Pass()])
string = """if True:\n pass\nelse:\n pass\n"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicIfElif(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
elif False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfElifWithSpace(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
elif False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfInElse(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False'))])
string = """if True:
pass
else:
if False:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testIfAndOthersInElse(self):
node = create_node.If(
create_node.Name('True'),
orelse=[create_node.If(create_node.Name('False')),
create_node.Expr(create_node.Name('True'))])
string = """if True:
pass
else:
if False:
pass
True
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class IfExpMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.IfExp(
create_node.Name('True'), create_node.Name('a'), create_node.Name('b'))
string = 'a if True else b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeParts(self):
node = create_node.IfExp(
create_node.Name('True'), create_node.Name('a'), create_node.Name('b'))
string = 'a if True else b'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.test = create_node.Name('False')
node.body = create_node.Name('c')
node.orelse = create_node.Name('d')
self.assertEqual('c if False else d', matcher.GetSource())
class LambdaMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Lambda(create_node.Name('a'))
string = 'lambda: a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithArgs(self):
node = create_node.Lambda(
create_node.Name('a'),
args=create_node.arguments(args=('b')))
string = 'lambda b: a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithArgsOnNewLine(self):
node = create_node.Lambda(
create_node.Name('a'),
args=create_node.arguments(args=('b')))
string = '(lambda\nb: a)'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ListComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.ListComp('c', 'a', 'b')
string = '[c for a in b]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.ListComp(
'c', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '[c for a in b if c < d]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class ModuleMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Module(create_node.Expr(create_node.Name('a')))
string = 'a\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithEmptyLines(self):
node = create_node.Module(
create_node.Expr(create_node.Name('a')),
create_node.Expr(create_node.Name('b')))
string = 'a\n\nb\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithCommentLines(self):
node = create_node.Module(
create_node.Expr(create_node.Name('a')),
create_node.Expr(create_node.Name('b')))
string = 'a\n#blah\nb\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class NameMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Name('foobar')
string = 'foobar'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('foobar', matcher.GetSource())
def testIdChange(self):
node = create_node.Name('foobar')
string = 'foobar'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.id = 'hello'
self.assertEqual('hello', matcher.GetSource())
class NumMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Num('1')
string = '1'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('1', matcher.GetSource())
def testBasicMatchWithSuffix(self):
node = create_node.Num('1')
string = '1L'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('1L', matcher.GetSource())
class SetMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Set('c', 'a', 'b')
string = '{c, a, b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class SetComprehensionMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.SetComp('c', 'a', 'b')
string = '{c for a in b}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithIf(self):
node = create_node.SetComp(
'c', 'a', 'b',
create_node.Compare('c', '<', 'd'))
string = '{c for a in b if c < d}'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class StrMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foobar"', matcher.GetSource())
def testPrefixMatch(self):
node = create_node.Str('foobar')
string = 'r"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('r"foobar"', matcher.GetSource())
def testQuoteWrapped(self):
node = create_node.Str('foobar')
string = '("foobar")'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('("foobar")', matcher.GetSource())
def testContinuationMatch(self):
node = create_node.Str('foobar')
string = '"foo"\n"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foo"\n"bar"', matcher.GetSource())
def testContinuationMatchWithPrefix(self):
node = create_node.Str('foobar')
string = '"foo"\nr"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"foo"\nr"bar"', matcher.GetSource())
def testBasicTripleQuoteMatch(self):
node = create_node.Str('foobar')
string = '"""foobar"""'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"""foobar"""', matcher.GetSource())
def testMultilineTripleQuoteMatch(self):
node = create_node.Str('foobar\n\nbaz')
string = '"""foobar\n\nbaz"""'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('"""foobar\n\nbaz"""', matcher.GetSource())
def testQuoteTypeMismatch(self):
node = create_node.Str('foobar')
string = '"foobar\''
matcher = source_match.GetMatcher(node)
with self.assertRaises(ValueError):
matcher.Match(string)
def testSChange(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.s = 'hello'
self.assertEqual('"hello"', matcher.GetSource())
def testSChangeInContinuation(self):
node = create_node.Str('foobar')
string = '"foo"\n"bar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.s = 'foobaz'
self.assertEqual('"foobaz"', matcher.GetSource())
def testQuoteTypeChange(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
matcher.quote_type = "'"
self.assertEqual("'foobar'", matcher.GetSource())
def testQuoteTypeChangeToTripleQuote(self):
node = create_node.Str('foobar')
string = '"foobar"'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
matcher.quote_type = "'''"
self.assertEqual("'''foobar'''", matcher.GetSource())
class SubscriptMatcherTest(unittest.TestCase):
"""Tests for the SyntaxFreeLine matcher."""
def testBasicMatch(self):
node = create_node.Subscript('a', 1)
string = 'a[1]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a[1]', matcher.GetSource())
def testAllPartsMatch(self):
node = create_node.Subscript('a', 1, 2, 3)
string = 'a[1:2:3]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a[1:2:3]', matcher.GetSource())
def testSeparatedWithStrings(self):
node = create_node.Subscript('a', 1, 2, 3)
string = 'a [ 1 : 2 : 3 ]'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('a [ 1 : 2 : 3 ]', matcher.GetSource())
class SyntaxFreeLineMatcherTest(unittest.TestCase):
"""Tests for the SyntaxFreeLine matcher."""
def testBasicMatch(self):
node = create_node.SyntaxFreeLine()
string = '\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('\n', matcher.GetSource())
def testVeryShortMatch(self):
node = create_node.SyntaxFreeLine(
comment='', col_offset=0, comment_indent=0)
string = '#\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('#\n', matcher.GetSource())
def testCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=0, comment_indent=0)
string = '#comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('#comment\n', matcher.GetSource())
def testIndentedCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=0, comment_indent=2)
string = '# comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual('# comment\n', matcher.GetSource())
def testOffsetCommentMatch(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=2, comment_indent=0)
string = ' #comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(' #comment\n', matcher.GetSource())
def testChangeComment(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=1, comment_indent=0)
string = ' #comment\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.col_offset = 0
node.comment_indent = 1
node.comment = 'hello'
self.assertEqual('# hello\n', matcher.GetSource())
def testNotCommentFails(self):
node = create_node.SyntaxFreeLine(
comment='comment', col_offset=1, comment_indent=0)
string = ' comment\n'
matcher = source_match.GetMatcher(node)
with self.assertRaises(source_match.BadlySpecifiedTemplateError):
matcher.Match(string)
class TryExceptMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])
string = """try:
a
except:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchMultipleExceptHandlers(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler('TestA'),
create_node.ExceptHandler('TestB')])
string = """try:
a
except TestA:
pass
except TestB:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchExceptAndOrElse(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()],
orelse=[create_node.Pass()])
string = """try:
a
except:
pass
else:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testMatchWithEmptyLine(self):
node = create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])
string = """try:
a
except:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class TryFinallyMatcherTest(unittest.TestCase):
def testBasicMatch(self):
node = create_node.TryFinally(
[create_node.Expr(create_node.Name('a'))],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithExcept(self):
node = create_node.TryFinally(
[create_node.TryExcept(
[create_node.Expr(create_node.Name('a'))],
[create_node.ExceptHandler()])],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
except:
pass
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicMatchWithBlankLines(self):
node = create_node.TryFinally(
[create_node.Expr(create_node.Name('a'))],
[create_node.Expr(create_node.Name('c'))])
string = """try:
a
finally:
c
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class UnaryOpMatcherTest(unittest.TestCase):
def testUAddUnaryOp(self):
node = create_node.UnaryOp(
create_node.UAdd(),
create_node.Name('a'))
string = '+a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testUSubUnaryOp(self):
node = create_node.UnaryOp(
create_node.USub(),
create_node.Name('a'))
string = '-a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testNotUnaryOp(self):
node = create_node.UnaryOp(
create_node.Not(),
create_node.Name('a'))
string = 'not a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testInvertUnaryOp(self):
node = create_node.UnaryOp(
create_node.Invert(),
create_node.Name('a'))
string = '~a'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
class WithMatcherTest(unittest.TestCase):
def testBasicWith(self):
node = create_node.With(
create_node.Name('a'))
string = 'with a:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testBasicWithAs(self):
node = create_node.With(
create_node.Name('a'), as_part=create_node.Name('b'))
string = 'with a as b:\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testWithAsTuple(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Tuple(create_node.Name('b'),
create_node.Name('c')))
string = 'with a as (b, c):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
def testChangeWithAsTuple(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Tuple(create_node.Name('b'),
create_node.Name('c')))
string = 'with a as (b, c):\n pass\n'
matcher = source_match.GetMatcher(node)
matcher.Match(string)
node.context_expr = create_node.Name('d')
node.optional_vars.elts[0] = create_node.Name('e')
node.optional_vars.elts[1] = create_node.Name('f')
self.assertEqual('with d as (e, f):\n pass\n', matcher.GetSource())
def testCompoundWith(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Name('c'),
body=[
create_node.With(
create_node.Name('b'),
as_part=create_node.Name('d')
)]
)
string = """with a as c, b as d:
pass
"""
matcher = source_match.GetMatcher(node)
matcher.Match(string)
self.assertEqual(string, matcher.GetSource())
# TODO: Renabled this after adding indent information to matchers
@unittest.expectedFailure
def testCompoundWithReplacements(self):
node = create_node.With(
create_node.Name('a'),
as_part=create_node.Name('c'),
body=[
create_node.With(
create_node.Name('b'),
as_part=create_node.Name('d')
)]
)
module_node = create_node.Module(node)
string = 'with a as c, b as d:\n pass\n'
node.matcher = source_match.GetMatcher(node)
node.matcher.Match(string)
node.body[0] = create_node.With(
create_node.Name('e'),
as_part=create_node.Name('f')
)
self.assertEqual('with a as c, e as f:\n pass\n',
node.matcher.GetSource())
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import ChatUser
admin.site.register(ChatUser)
|
nilq/baby-python
|
python
|
from config import *
from dbMgr import *
@app.before_request
def clear_trailing():
rp = request.path
if rp != '/' and rp.endswith('/'):
return redirect(rp[:-1])
@app.route('/test')
def default():
return render_template('login.html')
@app.before_request
def before():
logging.info("IP address: {}".format(request.remote_addr))
#logging.info("Received request with header: {}".format(request.headers))
pass
@app.route('/validate', methods=['GET', 'POST'])
def authorizeFacebookUser():
if request.method == 'POST':
# Get command pass from login page and verify
#print(request.form)
logging.info('Input received: {}'.format(request.form))
if request.form['pw'] == curationpass:
return render_template('login_fb.html', getcommanpass=False)
else:
return render_template('login_fb.html', getcommanpass=True, rsp="Invalid passcode! Please try again.")
return redirect(url_for('index'))
@app.route('/curation')
def show_curation():
if current_user.is_authenticated:
return render_template('curation.html')
else:
return redirect(url_for('index'))
@app.route('/datatable')
def datatable():
if current_user.is_authenticated:
return render_template('datatable.html', server=server[:-1], keys=sorted(museums.keys()), data=returnCurationResults())
else:
return redirect(url_for('index'))
@app.route('/spec')
def show_specs():
return render_template('spec.html', server=server[7:-1])
@app.route('/profile')
def show_user_profile():
if current_user.is_authenticated:
# Get Keys
keys = [t for t in sorted(museums.keys()) if t != "ulan" ]
# Get User stats
# getStats about all the questions answered by this user
u = dbC[dname]["curator"].find_one({'uid':current_user.email}, projection={'_id':False})
answers = dbC[dname]["answer"].find({'author':current_user.email})
# Initialize per museum stats
stats = {}
for tag in list(museums.keys()):
stats[tag] = {"matched":0,"unmatched":0,"no-conclusion":0}
for a in answers:
# find question and check its current status
q = dbC[dname]["question"].find_one({'_id':ObjectId(a['qid'])})
for tag in q['tags']:
tag = dbC[dname]["tag"].find_one({'_id':ObjectId(tag)})['tagname']
if q['status'] == statuscodes["Agreement"]:
stats[tag]["matched"] += 1
elif q['status'] == statuscodes["Disagreement"]:
stats[tag]["unmatched"] += 1
elif q['status'] == statuscodes["Non-conclusive"]:
stats[tag]["no-conclusion"] += 1
elif q['status'] == statuscodes["InProgress"]:
if a["value"] == 3:
stats[tag]["no-conclusion"] += 1
return render_template('profile.html', keys=keys, museums=museums, userStats=stats, server=server[:-1])
return redirect('/login')
@app.route('/results')
def show_results_page():
if current_user.is_authenticated:
keys = [t for t in sorted(museums.keys())]
return render_template('results.html', keys=keys, server=server[:-1])
return redirect('/login')
@app.route('/stats', methods=['GET'])
def get_museum_stats():
tag = request.args['tag'].lower()
#print("Received stats request for tag : "+tag)
logging.info("Received stats request for tag : {}".format(tag))
if current_user.is_authenticated:
return jsonify(museums[tag])
return redirect('/login')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/done')
def done():
if current_user.is_authenticated:
return render_template('done.html')
return redirect('/login')
@app.route('/about')
def about():
return render_template('about.html')
|
nilq/baby-python
|
python
|
import doctest
import pytest
if __name__ == "__main__":
doctest.testmod()
pytest.main()
|
nilq/baby-python
|
python
|
"Iterative Solvers for Sparse Linear Systems"
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
from .tfqmr import tfqmr
__all__ = [
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
'lgmres', 'lsmr', 'lsqr',
'minres', 'qmr', 'tfqmr'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the integration test for the gym skill."""
import os
import pytest
import shutil
import signal
import subprocess
import sys
import tempfile
import time
from pathlib import Path
import yaml
from aea.configurations.base import SkillConfig
from ...common.click_testing import CliRunner
from aea.cli import cli
from tests.conftest import CLI_LOG_OPTION
class TestGymSkill:
"""Test that gym skill works."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "my_gym_agent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_gym(self, pytestconfig):
"""Run the gym skill sequence."""
if pytestconfig.getoption("ci"):
pytest.skip("Skipping the test since it doesn't work in CI.")
# add packages folder
packages_src = os.path.join(self.cwd, 'packages')
packages_dst = os.path.join(os.getcwd(), 'packages')
shutil.copytree(packages_src, packages_dst)
# create agent
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "create", self.agent_name], standalone_mode=False)
assert result.exit_code == 0
agent_dir_path = os.path.join(self.t, self.agent_name)
os.chdir(agent_dir_path)
# add packages and install dependencies
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "skill", "gym"], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "connection", "gym"], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False)
assert result.exit_code == 0
# add gyms folder from examples
gyms_src = os.path.join(self.cwd, 'examples', 'gym_ex', 'gyms')
gyms_dst = os.path.join(self.t, self.agent_name, 'gyms')
shutil.copytree(gyms_src, gyms_dst)
# change config file of gym connection
file_src = os.path.join(self.cwd, 'tests', 'test_packages', 'test_skills', 'data', 'connection.yaml')
file_dst = os.path.join(self.t, self.agent_name, 'connections', 'gym', 'connection.yaml')
shutil.copyfile(file_src, file_dst)
# change number of training steps
skill_config_path = Path(self.t, self.agent_name, "skills", "gym", "skill.yaml")
skill_config = SkillConfig.from_json(yaml.safe_load(open(skill_config_path)))
skill_config.tasks.read("GymTask").args["nb_steps"] = 100
yaml.safe_dump(skill_config.json, open(skill_config_path, "w"))
process = subprocess.Popen([
sys.executable,
'-m',
'aea.cli',
"run",
"--connections",
"gym"
],
stdout=subprocess.PIPE,
env=os.environ.copy())
# check the gym run ends
time.sleep(10.0)
process.send_signal(signal.SIGINT)
process.wait(timeout=5)
assert process.returncode == 0
poll = process.poll()
if poll is None:
process.terminate()
process.wait(2)
os.chdir(self.t)
self.result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "delete", self.agent_name], standalone_mode=False)
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
|
nilq/baby-python
|
python
|
"""Tests for reloading generated pyi."""
from pytype import utils
from pytype.pytd import pytd
from pytype.tests import test_inference
class ReingestTest(test_inference.InferenceTest):
"""Tests for reloading the pyi we generate."""
def testContainer(self):
ty = self.Infer("""
class Container:
def Add(self):
pass
class A(Container):
pass
""")
with utils.Tempdir() as d:
d.create_file("foo.pyi", pytd.Print(ty))
self.assertNoErrors("""
# u.py
from foo import A
A().Add()
""", pythonpath=[d.path])
def testUnion(self):
ty = self.Infer("""
class Union(object):
pass
x = {"Union": Union}
""")
with utils.Tempdir() as d:
d.create_file("foo.pyi", pytd.Print(ty))
self.assertNoErrors("""
from foo import Union
""", pythonpath=[d.path])
if __name__ == "__main__":
test_inference.main()
|
nilq/baby-python
|
python
|
import time
import numpy as np
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
# import dataprep for tomography module
# import tomography module
# using the data prep module of analysis V2
# from pycqed.analysis_v2 import tomography_dataprep as dataprep
from pycqed.analysis import measurement_analysis as ma
try:
import qutip as qt
except ImportError as e:
pass
# logging.warning('Could not import qutip, tomo code will not work')
def reshape_block(shots_data, segments_per_block=16, block_size=4092, mode='truncate'):
"""
inputs: shots_data 1D array of dimension N
organizes data in blocks of dimension block_size.
num of blocks is N/block_size
"""
N = len(shots_data)
# Data dimension needs to be an integer multiple of block_size
assert(N%block_size==0)
num_blocks = N//block_size
full_segments = block_size//segments_per_block
orfan_segments = block_size % segments_per_block
missing_segments = segments_per_block - orfan_segments
# print(N,num_blocks,full_segments,orfan_segments,missing_segments)
reshaped_data = shots_data.reshape((num_blocks,block_size))
if mode.lower()=='truncate':
truncate_idx = full_segments*segments_per_block
return reshaped_data[:,:truncate_idx]
elif mode.lower()=='padd':
padd_dim = (full_segments+1)*segments_per_block
return_block = np.nan*np.ones((num_blocks,padd_dim))
return_block[:,:block_size] = reshaped_data
return return_block
else:
raise ValueError('Mode not understood. Needs to be truncate or padd')
def all_repetitions(shots_data,segments_per_block=16):
flat_dim = shots_data.shape[0]*shots_data.shape[1]
# Data dimension needs to divide the segments_per_block
assert(flat_dim%segments_per_block==0)
num_blocks = flat_dim // segments_per_block
block_data = shots_data.reshape((num_blocks,segments_per_block))
return block_data
def get_segments_average(shots_data, segments_per_block=16, block_size=4092, mode='truncate', average=True):
reshaped_data = reshape_block(shots_data=shots_data,
segments_per_block=segments_per_block,
block_size=block_size,
mode=mode)
all_reps = all_repetitions(shots_data=reshaped_data,
segments_per_block=segments_per_block)
if average:
return np.mean(all_reps,axis=0)
else:
return all_reps
class ExpectationValueCalculation:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
# Binning all the points required for the tomo
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
# print(len(self.measurements_cal))
# print(self.measurements_cal)
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[0:4])
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
# print(self.measurements_cal[0:4])
# print(betas[0:4])
betas[4:8] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[4:8])
# print(betas[4:8])
betas[8:] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[8:12])
# print(betas[8:])
return betas
def expectation_value_calculation_IdenZ(self):
betas = self._calibrate_betas()
#inverting the unprimed beta matrix
#up is unprimed
self.betas = betas
# print(self.betas[0:4], self.betas[4:8], self.betas[8:])
beta_0_up =self.betas[0]
beta_1_up =self.betas[1]
beta_2_up =self.betas[2]
beta_3_up =self.betas[3]
beta_matrix_up = np.array([[beta_0_up,beta_1_up,beta_2_up,beta_3_up],
[beta_0_up,-1*beta_1_up,beta_2_up,-1*beta_3_up],
[beta_0_up,beta_1_up,-1*beta_2_up,-1*beta_3_up],
[beta_0_up,-1*beta_1_up,-1*beta_2_up,beta_3_up]])
#assuming 0:4 are
# expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[1:4])
expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[0:4])
#inverting the primed beta matrix
#p is primed
beta_0_p =self.betas[4]
beta_1_p =self.betas[5]
beta_2_p =self.betas[6]
beta_3_p =self.betas[7]
beta_matrix_p = np.array([[beta_0_p,beta_1_p,beta_2_p,beta_3_p],
[beta_0_p,-1*beta_1_p,beta_2_p,-1*beta_3_p],
[beta_0_p,beta_1_p,-1*beta_2_p,-1*beta_3_p],
[beta_0_p,-1*beta_1_p,-1*beta_2_p,beta_3_p]])
# beta_matrix_p = np.array([[-1*beta_1_p,beta_2_p,-1*beta_3_p],
# [beta_1_p,-1*beta_2_p,-1*beta_3_p],
# [-1*beta_1_p,-1*beta_2_p,beta_3_p]])
#assuming 0:4 are
expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[8:12])
# expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#inverting the unprimed beta matrix
#up is unprimed
beta_0_pp =self.betas[8]
beta_1_pp =self.betas[9]
beta_2_pp =self.betas[10]
beta_3_pp =self.betas[11]
beta_matrix_pp = np.array([[beta_0_pp,beta_1_pp,beta_2_pp,beta_3_pp],
[beta_0_pp,-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
[beta_0_pp,beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
[beta_0_pp,-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
# beta_matrix_pp = np.array([[-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
# [beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
# [-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
#assuming 0:4 are
expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_pp), self.measurements_tomo[16:20])
# expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#take the mean of calculated expectation values of II, IZ, ZI, ZZ
#for three different beta vectors
expect_value_IdenZ = np.mean( np.array([expect_value_IdenZ_up,
expect_value_IdenZ_p,
expect_value_IdenZ_pp]),
axis=0 )
print(expect_value_IdenZ_up)
print(expect_value_IdenZ_p)
print(expect_value_IdenZ_pp)
return expect_value_IdenZ
def expectation_value_calculation_XX(self):
expect_value_XX_up = ((self.measurements_tomo[4] + self.measurements_tomo[5]) -2*self.betas[0])/2*self.betas[3]
expect_value_XX_p = ((self.measurements_tomo[12] + self.measurements_tomo[13])-2*self.betas[4])/2*self.betas[7]
expect_value_XX_pp = ((self.measurements_tomo[20] + self.measurements_tomo[21]) - 2*self.betas[8])/2*self.betas[11]
expectation_value_XX = (expect_value_XX_up + expect_value_XX_p + expect_value_XX_pp)/3
# print(expect_value_XX_up, expect_value_XX_p, expect_value_XX_pp)
return expectation_value_XX
def expectation_value_calculation_YY(self):
expect_value_YY_up = ((self.measurements_tomo[6] + self.measurements_tomo[7]) -2*self.betas[0])/2*self.betas[3]
expect_value_YY_p = ((self.measurements_tomo[14] + self.measurements_tomo[15])-2*self.betas[4])/2*self.betas[7]
expect_value_YY_pp = ((self.measurements_tomo[22] + self.measurements_tomo[23]) - 2*self.betas[8])/2*self.betas[11]
# print(expect_value_YY_up, expect_value_YY_p, expect_value_YY_pp)
expectation_value_YY = (expect_value_YY_up + expect_value_YY_p + expect_value_YY_pp)/3
return expectation_value_YY
def execute_expectation_value_calculation(self):
expect_values = np.zeros(6)
expect_values[0:4] = self.expectation_value_calculation_IdenZ()
# print(self.expectation_value_calculation_IdenZ())
expect_values[4] = self.expectation_value_calculation_XX()
# print(self.expectation_value_calculation_XX())
expect_values[5] = self.expectation_value_calculation_YY()
# print(self.expectation_value_calculation_YY())
return expect_values, self.betas
class ExpectationValueCalculation2:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
class ExpectationValueCalculation3_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
mean_h1 = (h1_00+h1_10+h1_01+h1_11)/4
mean_h2 = (h2_00+h2_01+h2_10+h2_11)/4
mean_h12 = (h12_00+h12_11+h12_01+h12_10)/4
#subtract beta 0 from all measurements
#rescale them
avg_h1 -= mean_h1
avg_h2 -= mean_h2
avg_h12 -= mean_h12
scale_h1 = (h1_00+h1_10-h1_01-h1_11)/4
scale_h2 = (h2_00+h2_01-h2_10-h2_11)/4
scale_h12 = (h12_00+h12_11-h12_01-h12_10)/4
avg_h1 = (avg_h1)/scale_h1
avg_h2 = (avg_h2)/scale_h2
avg_h12 = (avg_h12)/scale_h12
#The averages have been redefined so redefine the cal terms
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
# II IZ ZI ZZ IX XI XX IY YI YY
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0], # 36
0, 0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0, # 29
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[2] - ev[1])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[5] + ev[4])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
print(self.expect_values)
expect_values_VQE = np.array([1,
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
self.expect_values = expect_values_VQE
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
beta_0_vec = np.repeat([self.betas_up[0],
self.betas_p[0],
self.betas_pp[0]], 8)
rescaled_measurements_tomo = self.measurements_tomo - beta_0_vec
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
self.expect_values = expect_values_VQE
print(self.expect_values)
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
self.expect_values = expect_values_VQE
return expect_values_VQE
class ExpectationValueCalculation2_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
|
nilq/baby-python
|
python
|
from sys import modules
from unittest.mock import MagicMock
mock_sys_info = modules["pitop.common.sys_info"] = MagicMock()
mock_sys_info.is_pi = MagicMock(return_value=False)
mock_curr_session_info = modules["pitop.common.current_session_info"] = MagicMock()
mock_curr_session_info.get_first_display = MagicMock(return_value=None)
modules_to_patch = [
"PIL",
"pyinotify",
"pitop.camera",
"numpy",
"pitop.common",
]
for module in modules_to_patch:
modules[module] = MagicMock()
from os import environ, path
from unittest import TestCase, skip
from PIL import Image
# Avoid getting the mocked modules in other tests
for patched_module in modules_to_patch:
del modules[patched_module]
root = path.dirname(path.dirname(path.abspath(__file__)))
@skip
class OLEDTestCase(TestCase):
@classmethod
def setUpClass(cls):
environ["SDL_VIDEODRIVER"] = "dummy"
@classmethod
def tearDownClass(cls):
del environ["SDL_VIDEODRIVER"]
def setUp(self):
from pitop.miniscreen import Miniscreen
self.miniscreen = Miniscreen()
def tearDown(self):
pass
def get_bitmap_pix(self, file_path):
bmp = Image.open(file_path).convert("1")
bmp = bmp.point(lambda x: 0 if x == 0 else 1, "1")
return self.miniscreen.core.canvas._pil_image_to_pix_arr(bmp)
def compare_arrays(self, func_name, canvas_pix, bmp_pix):
print("CANVAS:")
print(canvas_pix)
print("BITMAP:")
print(bmp_pix)
self.assertEqual(canvas_pix.all(), bmp_pix.all())
def test_image(self):
logo_path = root + "/assets/images/pi-top.png"
img = Image.open(logo_path)
canvas_pix = self.miniscreen.core.canvas.image(
self.miniscreen.core.canvas.top_left(), img
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/pi-top.bmp")
self.compare_arrays("image", canvas_pix, bmp_pix)
def test_rectangle(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.rectangle(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/rectangle.bmp")
self.compare_arrays("rectangle", canvas_pix, bmp_pix)
def test_arc(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.arc(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/arc.bmp")
self.compare_arrays("arc", canvas_pix, bmp_pix)
def test_chord(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.chord(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/chord.bmp")
self.compare_arrays("chord", canvas_pix, bmp_pix)
def test_ellipse(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.ellipse(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/ellipse.bmp")
self.compare_arrays("ellipse", canvas_pix, bmp_pix)
def test_line(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.line(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/line.bmp")
self.compare_arrays("line", canvas_pix, bmp_pix)
def test_pieslice(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.pieslice(
self.miniscreen.bounding_box, 0, 180
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/pieslice.bmp")
self.compare_arrays("pieslice", canvas_pix, bmp_pix)
def test_point(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.point(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/point.bmp")
self.compare_arrays("point", canvas_pix, bmp_pix)
def test_polygon(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.polygon(self.miniscreen.bounding_box)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/polygon.bmp")
self.compare_arrays("polygon", canvas_pix, bmp_pix)
def test_text(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.text(
self.miniscreen.core.canvas.top_left(), "test"
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/text.bmp")
self.compare_arrays("text", canvas_pix, bmp_pix)
def test_multiline_text(self):
self.miniscreen.reset()
canvas_pix = self.miniscreen.core.canvas.multiline_text(
self.miniscreen.core.canvas.top_left(), "Hello World!"
)
bmp_pix = self.get_bitmap_pix(root + "/assets/bitmaps/multiline_text.bmp")
self.compare_arrays("multiline_text", canvas_pix, bmp_pix)
def test_max_fps(self):
max_fps = 50
self.miniscreen.reset()
self.miniscreen.fps_regulator.set_max_fps(max_fps)
max_sleep_time = self.miniscreen.fps_regulator.max_sleep_time
self.assertEqual(max_sleep_time, 1 / max_fps)
|
nilq/baby-python
|
python
|
import os
import boto3
AWS_ENDPOINT_URL = os.getenv("AWS_ENDPOINT_URL", None)
def handler(event, context):
client = boto3.client("s3", endpoint_url=AWS_ENDPOINT_URL)
client.create_bucket(Bucket="foo")
client.create_bucket(Bucket="bar")
buckets = client.list_buckets()["Buckets"]
l = []
for bucket in buckets:
l.append(bucket["Name"])
return str(l)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-17 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20170509_1559'),
('pfb_analysis', '0025_auto_20170511_1244'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='label',
field=models.CharField(help_text='Human-readable label for neighborhood, should not include State', max_length=256),
),
migrations.AlterUniqueTogether(
name='neighborhood',
unique_together=set([('name', 'state_abbrev', 'organization')]),
),
]
|
nilq/baby-python
|
python
|
from rest_framework.exceptions import APIException
from rest_framework import status
class InvalidParameterException(APIException):
"""Exception for invalid request parameters."""
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Request contained an invalid parameter'
default_code = 'invalid_request'
class UnprocessableEntityException(APIException):
"""https://tools.ietf.org/html/rfc4918"""
status_code = 422
default_detail = 'Request parameter is valid but unable to process due to constraints'
default_code = 'invalid_request'
class ElasticsearchConnectionException(APIException):
"""Exception for invalid request parameters."""
status_code = 500
default_detail = 'Unable to reach the Elasticsearch Cluster'
default_code = 'service_unavailable'
|
nilq/baby-python
|
python
|
import arff
import argparse
import json
import logging
import openmlcontrib
import openmldefaults
import os
import sklearnbot
def parse_args():
metadata_file = '/home/janvanrijn/experiments/sklearn-bot/results/results__500__svc__predictive_accuracy.arff'
parser = argparse.ArgumentParser(description='Creates an ARFF file')
parser.add_argument('--output_directory', type=str, help='directory to store output',
default=os.path.expanduser('~') + '/experiments/openml-defaults/generated_data/')
parser.add_argument('--study_id', type=str, default='OpenML100', help='the tag to obtain the tasks from')
parser.add_argument('--metadata_file', type=str, default=metadata_file)
parser.add_argument('--classifier_name', type=str, default='svc', help='scikit-learn flow name')
parser.add_argument('--scoring', type=str, default='predictive_accuracy')
parser.add_argument('--resized_grid_size', type=int, default=8)
parser.add_argument('--random_seed', type=int, default=42)
return parser.parse_args()
def run(args):
root = logging.getLogger()
root.setLevel(logging.INFO)
config_space = sklearnbot.config_spaces.get_config_space(args.classifier_name, args.random_seed)
meta_data = openmldefaults.utils.get_dataset_metadata(args.metadata_file)
if args.scoring not in meta_data['measure']:
raise ValueError('Could not find measure: %s' % args.scoring)
metadata_frame = openmldefaults.utils.metadata_file_to_frame(args.metadata_file, config_space, args.scoring)
df_surrogate = openmldefaults.utils.generate_grid_dataset(metadata_frame,
config_space,
args.resized_grid_size,
args.scoring,
args.random_seed)
# if df_surrogate.shape[1] < num_params + len(study.tasks) / 2:
# raise ValueError('surrogate frame has too few columns. Min: %d Got %d' % (num_params + len(study.tasks) / 2,
# df_surrogate.shape[1]))
os.makedirs(args.output_directory, exist_ok=True)
df_surrogate.reset_index(inplace=True)
arff_object = openmlcontrib.meta.dataframe_to_arff(df_surrogate,
'surrogate_%s' % args.classifier_name,
json.dumps(meta_data))
filename = os.path.join(args.output_directory, 'surrogate__%s__%s__c%d.arff' % (args.classifier_name,
args.scoring,
args.resized_grid_size))
with open(filename, 'w') as fp:
arff.dump(arff_object, fp)
logging.info('Saved to: %s' % filename)
if __name__ == '__main__':
run(parse_args())
|
nilq/baby-python
|
python
|
"""
This module executes the string matching between a input sequence T and an
pattern P using a Finite State Machine.
The complexity for building the transition function is O(m^3 x |A|) where A is the
alphabet. Since the string matching function scan the input sequence only once,
the total complexity is O(n + m^3 x |A|)
@author Filippo Squillace
@version 1.0.0
@date 07/06/2012
"""
def string_matching_FSM(T, trans, m):
"""
T: is the input sequence;
trans: is the transition function that define the pattern P we need to look
for;
m: lenght of the pattern
"""
s = 0
for i,c in enumerate(T):
s = trans[s][c]
if s == m:
return i-m+1
return -1
import string as st
def transition_function(P):
"""
The main principle on building the transition function is to think about
the fact that every time we scan a new character from the input sequence
the suffix should match with the prefix of the pattern. If that is not
possible for every length of the suffix, the next state need to be the
initial, otherwise the length of the suffix that matches properly will be
exactly the next state.
"""
alphabet = st.ascii_letters+st.punctuation+st.digits+st.whitespace
m = len(P)
trans = [{c:0 for c in alphabet} for i in range(m)]
for s in range(m):
for c in alphabet:
k = min(m, s+1)
while (P[:s]+c)[-k:] != P[:k]:
k-=1
trans[s][c]=k
return trans
if __name__=='__main__':
import unittest
class StringMatchTestCase(unittest.TestCase):
def setUp(self):
# Table of (sequence,pattern,expected_result)
self.pos_cases = [\
('abcbbaanmdiababcdrttf','ababcd',11),
('abcbbaanmdiabafweefabab','abab',19),
('abcbbaanmdiasfo pfj=pewpfiojafaXre8abbafw_ eefabab','aXre8ab',30)
]
self.neg_cases = [\
('abcbbaanmdiabcdrttf','ababcd',-1),
('abcbbaanmdiabafweefaba','abab',-1),
('abcbb_?aaFSRnmfew345sdhfhhuw.fad iabafweefaba','abab',-1)
]
def test_positive(self):
for (T,P,er) in self.pos_cases:
trans = transition_function(P)
res = string_matching_FSM(T, trans, len(P))
self.assertEqual(res, er)
def test_negative(self):
for (T,P,er) in self.neg_cases:
trans = transition_function(P)
res = string_matching_FSM(T, trans, len(P))
self.assertEqual(res, er)
unittest.main()
|
nilq/baby-python
|
python
|
import tkinter
window = tkinter.Tk()
window.title("Test")
top_frame = tkinter.Frame(window).pack()
bottom_frame = tkinter.Frame(window).pack(side="bottom")
# label = tkinter.Label(window, text="Hello, world!").pack()
btn1 = tkinter.Button(top_frame, text="B1", fg="red").pack()
btn2 = tkinter.Button(top_frame, text="B2", fg="green").pack()
btn3 = tkinter.Button(bottom_frame, text="B3", fg="purple").pack(side="left")
btn4 = tkinter.Button(bottom_frame, text="B4", fg="orange").pack(side="left")
window.mainloop()
|
nilq/baby-python
|
python
|
from setuptools import setup, Extension
with open('README.md', 'r') as f:
long_description = f.read()
meow_ext = Extension(
'meowhash.cpython',
# define_macros=[('MEOW_HASH_256', '0'), ('MEOW_HASH_512', '0')],
sources=['meowhash/cpython.c'],
extra_compile_args=['-mavx512f', '-mavx512vl', '-maes',
'-mavx512f', '-mavx512pf', '-mavx512er', '-mavx512cd',
'-mavx512vl', '-mavx512bw', '-mavx512dq', '-mavx512ifma',
'-mavx512vbmi'
],
include_dirs=['lib'])
setup(
name='meowhash',
version='0.1',
description='This is a demo package',
author='James Liu',
author_email='contact@jamessliu.com',
license='MIT',
url='https://github.com/james7132/py-meowhash',
long_description=long_description,
packages=['meowhash'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
ext_modules=[meow_ext])
|
nilq/baby-python
|
python
|
# Copyright The IETF Trust 2007-2019, All Rights Reserved
#
from django.contrib.sitemaps import GenericSitemap
from ietf.ipr.models import IprDisclosureBase
# changefreq is "never except when it gets updated or withdrawn"
# so skip giving one
queryset = IprDisclosureBase.objects.filter(state__in=('posted','removed'))
archive = {'queryset':queryset, 'date_field': 'time', 'allow_empty':True }
IPRMap = GenericSitemap(archive) # type: ignore
|
nilq/baby-python
|
python
|
from grpclib.exceptions import GRPCError
from insanic.exceptions import APIException
from interstellar.exceptions import InvalidArgumentError
from grpc_test_monkey_v1.monkey_grpc import ApeServiceBase, MonkeyServiceBase
from grpc_test_monkey_v1.monkey_pb2 import ApeResponse, MonkeyResponse
class PlanetOfTheApes(ApeServiceBase):
async def GetChimpanzee(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.ApeRequest, grpc_test_monkey.monkey_pb2.ApeResponse]'):
request = await stream.recv_message()
if request.include == "sound":
response = ApeResponse(id=int(request.id), extra="woo woo ahh ahh")
else:
response = ApeResponse(id=int(request.id), extra="i don't know")
await stream.send_message(response)
async def GetGorilla(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.ApeRequest, grpc_test_monkey.monkey_pb2.ApeResponse]'):
request = await stream.recv_message()
if request.include == "sound":
response = ApeResponse(id=int(request.id), extra="raaahhh")
else:
response = ApeResponse(id=int(request.id), extra="i don't know")
await stream.send_message(response)
class PlanetOfTheMonkeys(MonkeyServiceBase):
async def GetMonkey(self,
stream: 'grpclib.server.Stream[grpc_test_monkey.monkey_pb2.MonkeyRequest, grpc_test_monkey.monkey_pb2.MonkeyResponse]'):
request = await stream.recv_message()
if request.id == "uncaught_exception":
raise Exception("Something Broke")
elif request.id == "api_exception":
raise APIException("help")
elif request.id == "grpc_error":
raise InvalidArgumentError(message="bad bad")
response = MonkeyResponse()
await stream.send_message(response)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2005-2011 Grameen Foundation USA
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# See also http://www.apache.org/licenses/LICENSE-2.0.html for an
# explanation of the license and how it is applied.
import sys, re
LICENSE_TEXT="""/*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
"""
class Relicense:
"""Changes the license text that appears at the start of Mifos java files. Will add a license to files that do not contain one.
To relicense all java files in the source tree, do something like this:
find . -not -ipath "*/target/*" -not -ipath "*.svn*" -iname "*.java"| xargs -ifoo ./resources/relicense-java-file.py foo
"""
def __init__(self):
pass
def main(self):
if len(sys.argv) < 2:
sys.exit(0)
filename = sys.argv[1]
self.relicense(filename)
def relicense(self, filename):
contents = self.readEntireFile(filename)
newContents = self.replaceLicense(contents, LICENSE_TEXT)
if (contents != newContents):
self.writeEntireFile(filename, newContents)
print "Relicensed file: %s" % filename
def replaceLicense(self, contents, license):
noLicenseRe = re.match("^\w", contents, re.MULTILINE | re.DOTALL)
if (noLicenseRe):
return license + contents
licenseRe = re.compile("^(/\*.*?\*/\s*)", re.MULTILINE | re.DOTALL)
return licenseRe.sub(license, contents, 1)
def readEntireFile(self, filename):
file = open(filename, "r")
contents = file.read()
file.close
return contents
def writeEntireFile(self, filename, contents):
file = open(filename, "w")
contents = file.write(contents)
file.close
if __name__ == "__main__":
Relicense().main()
|
nilq/baby-python
|
python
|
def vatCal(totalPrice):
result = totalPrice + (totalPrice*7/100)
return result
TotalPrice = int(input("Put your price : "))
print("Your total price is",vatCal(TotalPrice))
|
nilq/baby-python
|
python
|
import angr
from angr.sim_type import SimTypeInt
######################################
# getchar
######################################
class getchar(angr.SimProcedure):
def run(self):
self.return_type = SimTypeInt(32, True)
data = self.inline_call(
# TODO: use a less private getc
angr.SIM_PROCEDURES['glibc']['_IO_getc'], 0).ret_expr # stdin
return data
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
sys.path.insert(0, '..')
import glob
import numpy as np
from dotmap import DotMap
from simpleplotlib import plot
from parse_logs import parse_hdfs_logs, parse_hdfs_throughput
bytes_units = 2.0**-30
types = ['HDFS+static', 'HDFS+resize', 'HDFS+reTCP', 'reHDFS+static',
'reHDFS+resize', 'reHDFS+reTCP']
fn_keys = {
'normal-16-QUEUE-False': 'static',
'normal-16-QUEUE-True-20000-reno': 'resize',
'normal-16-QUEUE-True-20000-retcp': 'reTCP',
'normal-16-ADU-False': 'adu',
'normal-16-ADU-True-20000-reno': 'adu+resize',
'normal-16-ADU-True-20000-retcp': 'adu+reTCP',
}
files = [
'/tmp/*QUEUE-False*-HDFS-dfsioe',
'/tmp/*QUEUE-True-20000-reno*-HDFS-dfsioe',
'/tmp/*QUEUE-True-20000-retcp*-HDFS-dfsioe',
'/tmp/*QUEUE-False*-reHDFS-dfsioe',
'/tmp/*QUEUE-True-20000-reno*-reHDFS-dfsioe',
'/tmp/*QUEUE-True-20000-retcp*-reHDFS-dfsioe',
]
files_short = [files[0], files[3]]
def get_default_plot_options(x, y):
options = DotMap()
options.plot_type = 'BAR'
options.legend.options.labels = ['HDFS', 'HDFS + Resize',
'HDFS + reTCP', 'reHDFS',
'reHDFS + Resize',
'reHDFS + reTCP']
options.series.color_groups = [0, 0, 0, 1, 1, 1]
options.legend.order = [0, 2, 4, 1, 3, 5]
options.legend.options.fontsize = 19
options.legend.options.ncol = 3
options.x.ticks.major.show = False
return options
def graph_wct(data):
x = data
y = [[float(j) / (len(x[i])-1) * 100 for j in xrange(len(x[i]))]
for i in xrange(len(x))]
options = get_default_plot_options(x, y)
options.plot_type = 'LINE'
options.legend.options.labels = ['HDFS', 'reHDFS']
options.series_options = [DotMap(linewidth=5) for i in range(len(x))]
options.output_fn = 'graphs/hdfs_writes_cdf.pdf'
options.x.label.xlabel = 'HDFS write completion time (ms)'
options.y.label.ylabel = 'CDF (%)'
del options.series.color_groups
del options.legend.options.ncol
del options.x.ticks.major.show
plot(x, y, options)
def graph_tail(data):
x = np.array([[0] for i in xrange(len(data))])
y = [np.percentile(d, 99) for d in data]
options = get_default_plot_options(x, y)
options.y.limits = [0, 1500]
options.output_fn = 'graphs/hdfs_99th.pdf'
options.y.label.ylabel = '99th percent. writes (ms)'
options.y.ticks.major.show = False
del options.legend.options.ncol
del options.legend.order
plot(x, y, options)
def graph_throughput(data):
x = np.array([[0] for i in xrange(len(data))])
y = data
options = get_default_plot_options(x, y)
options.horizontal_lines.lines = [80*8 + 10*8]
options.legend.options.fontsize = 18
options.y.label_offset = [-0.01, -.13]
options.y.limits = [0, 1100]
options.output_fn = 'graphs/hdfs_throughput.pdf'
options.y.label.ylabel = 'Agg. tput. (Gbps)'
options.y.ticks.major.show = False
plot(x, y, options)
def bytes_graph():
data = {}
for fn in glob.glob(sys.argv[1] + '/*.counters.txt'):
key = 'reHDFS+' if 'reHDFS' in fn else 'HDFS+'
key += [k for n, k in fn_keys.items() if n in fn][0]
c, p, _ = eval(open(fn).read())
c = sum([int(b.split('\n')[-1]) * bytes_units for b in c])
p = sum([int(b.split('\n')[-1]) * bytes_units for b in p])
data[key] = p, c
y = [data[t] for t in types]
x = np.array([[0, 1] for i in xrange(len(y))])
options = get_default_plot_options(x, y)
options.bar_labels.show = False
options.legend.options.fontsize = 18
options.y.label_offset = [-.07, -.18]
options.y.limits = [0, 40]
options.x.ticks.major.labels = DotMap(
text=['Packet', 'Circuit'])
options.y.ticks.major.labels = DotMap(
locations=[0, 5, 10, 15, 20, 25])
options.output_fn = 'graphs/hdfs_utilization.pdf'
options.x.label.xlabel = 'Which switch'
options.y.label.ylabel = 'Bytes sent (GB)'
plot(x, y, options)
if __name__ == '__main__':
graph_wct([parse_hdfs_logs(sys.argv[1] + n) for n in files_short])
graph_tail([parse_hdfs_logs(sys.argv[1] + n) for n in files])
graph_throughput([parse_hdfs_throughput(sys.argv[1] + n) for n in files])
bytes_graph()
|
nilq/baby-python
|
python
|
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
import argparse
import json
import numpy as np
from isaac import Application, Cask, Codelet, Composite
import apps.samples.pick_and_place.pick_and_place as pick_and_place
from apps.samples.pick_and_place.task_planner import *
def create_composite_atlas_ur10(cask_root, joints):
'''Creates composite atlas cask with waypoints for ur10. Tested with ovkit sim.'''
if len(joints) != 6:
raise ValueError("UR10 should have 6 joints, got {}".format(len(joints)))
cask = Cask(cask_root, writable=True)
# joint waypoints
quantities = [[x, "position", 1] for x in joints]
HOME_POSE_WAYPOINT = np.array(
[1.3504, -1.4784, 1.6887, -1.7811, -1.5708, 1.3488], dtype=np.dtype("float64"))
VIEW_POSE_WAYPOINT = np.array(
[2.1358, -1.4784, 1.6887, -1.7811, -1.5708, 0.5635], dtype=np.dtype("float64"))
APPROACH_POSE_WAYPOINT = np.array(
[-0.2966, -1.062, 1.251, -1.38, -1.716, 0.217], dtype=np.dtype("float64"))
cask.write_message(
pick_and_place.create_composite_waypoint("home_pose", quantities, HOME_POSE_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("view_pose", quantities, VIEW_POSE_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("approach_pose", quantities,
APPROACH_POSE_WAYPOINT))
# gripper waypoints
quantities = [[x, "none", 1] for x in ["pump", "valve", "gripper"]]
SUCTION_ON_WAYPOINT = np.array([1.0, 0.0, 1.0], dtype=np.dtype("float64"))
SUCTION_OFF_WAYPOINT = np.array([0.0, 1.0, 0.0], dtype=np.dtype("float64"))
VALVE_OFF_WAYPOINT = np.array([0.0, 0.0, 0.0], dtype=np.dtype("float64"))
cask.write_message(
pick_and_place.create_composite_waypoint("suction_on", quantities, SUCTION_ON_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("suction_off", quantities, SUCTION_OFF_WAYPOINT))
cask.write_message(
pick_and_place.create_composite_waypoint("valve_off", quantities, VALVE_OFF_WAYPOINT))
class MissionFeeder(Codelet):
'''Reads a list of tasks from config and adds it to task_planner.'''
def start(self):
tasks = self.config.tasks
if tasks is None:
self.report_failure("No valid mission")
return
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
self.task_planner.clear_all_tasks()
for m in tasks:
task_planner.pick_and_place_object(m['pick'], m['place'])
self.log_info("Received {0} tasks".format(len(tasks)))
self.report_success()
class TasksRemainingChecker(Codelet):
'''Reports success if task_manager has remaining tasks on start, otherwise false.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
if task_planner.all_tasks_done():
self.report_failure("All tasks are done.")
else:
self.report_success("Tasks remain.")
class TaskRemover(Codelet):
'''Marks the current task in the task planner as done and reports success on start.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
task_planner.mark_current_task_as_done()
self.report_success("Current task is done.")
class AllTasksDoneChecker(Codelet):
'''Reports success if task_planner has no more tasks on start, otherwise reports failure.'''
def start(self):
if not hasattr(self, 'task_planner'):
raise AttributeError("task_planner not set before codelet start.")
if not isinstance(self.task_planner, TaskPlannerInterface):
raise TypeError("task_planner is not of type TaskPlannerInterface")
if task_planner.all_tasks_done():
self.report_success("All tasks are done.")
else:
self.report_failure("Tasks remain.")
# Main part that sets up the app's logic and starts it afterwards.
if __name__ == '__main__':
# Parse command line arguments. The only option available at the moment is to choose between a
# 'mock' controller setup (very basic linear controller, no state visualization) and the multi
# joint LQR controller. When `--mock` is set, the mock controller is used. Otherwise, the LQR
# controller is used.
parser = argparse.ArgumentParser()
parser.add_argument(
"--cask", help="Path to output atlas", default="/tmp/pick_and_place_waypoints")
parser.add_argument(
"--kinematic_file",
help="Path to kinematic json file",
default="apps/assets/kinematic_trees/ur10.kinematic.json")
parser.add_argument("--speed", help="Maximum joint speed", type=float, default=1.0)
parser.add_argument(
"--acceleration", help="Maximum joint acceleration", type=float, default=1.0)
parser.add_argument(
"--sim_host", type=str, help="Host ip for simulator (TcpSubscriber)", default="localhost")
parser.add_argument(
"--sim_output_port",
type=int,
help="Port to receive message from simulator (TcpSubscriber)",
default=46000)
parser.add_argument(
"--sim_input_port",
type=int,
help="Port to publish message to simulator (TcpPublisher). Default to output_port+1")
parser.add_argument(
"--robot_index", type=int, help="Channel suffix for goal for the current robot.", default=0)
parser.add_argument("--sight_port", type=int, help="Port for websight", default=3000)
parser.add_argument(
"--robot_name",
type=str,
help="Accept missions from the remote mission server for the robot with the given name",
default="station")
parser.add_argument(
"--mission_host",
type=str,
help="The ip address or hostname of the host to connect to and receive missions from",
default="localhost")
parser.add_argument(
"--mission_port",
type=int,
help="Port to receive goal from task manager (TcpSubscriber).",
default=9998)
args = parser.parse_args()
# Read the arm joints from file.
arm_joint_names = []
with open(args.kinematic_file) as kinematic_file_handle:
file_contents = json.load(kinematic_file_handle)
if file_contents is None:
raise ValueError("Unable to load kinematic json file {0}".format(args.kinematic_file))
for link in file_contents['links']:
if 'motor' in link and link['motor']['type'] != 'constant':
arm_joint_names.append(link['name'])
# create composite atlas
create_composite_atlas_ur10(args.cask, arm_joint_names)
app = Application(app_filename='packages/multi_robot_fof/station.app.json')
app.load_module("sight")
app.nodes["atlas"]["CompositeAtlas"].config.cask = args.cask
app.load('packages/multi_robot_fof/ur10.config.json')
# Configure the kinematic tree for the controller and for inverse kinematics.
kinematic_tree = app.nodes['controller.kinematic_tree']['KinematicTree']
kinematic_tree.config.kinematic_file = args.kinematic_file
root_frame = '/environments/stations/station_{0}/assembly_robot/ur10'.format(args.robot_index)
for node in ['pick_task.cartesian_planner', 'place_task.cartesian_planner']:
inverse_kinematics_planner = app.nodes[node]['EndEffectorGlobalPlanner']
inverse_kinematics_planner.config.kinematic_tree = 'controller.kinematic_tree'
inverse_kinematics_planner.config.root_frame = root_frame
app.nodes['controller.kinematic_tree']['KinematicTreeToPoseTree'].config.root_frame = root_frame
app.nodes['pick_task.detections_to_pose_tree'][
'DetectionsToPoseTree'].config.detection_frame = 'world'
# Configure velocity and acceleration limits for the planner.
planner = app.nodes['controller.local_plan']['MultiJointLqrPlanner']
planner.config.speed_min = [-args.speed] * len(arm_joint_names)
planner.config.speed_max = [args.speed] * len(arm_joint_names)
planner.config.acceleration_min = [-args.acceleration] * len(arm_joint_names)
planner.config.acceleration_max = [args.acceleration] * len(arm_joint_names)
task_planner = SimpleTaskPlanner()
# Prepare relinking the target poses
app.nodes['pick_task.relink_target_pose'].add(pick_and_place.RelinkTargetPoseCodelet)
destination = app.nodes['place_task.relink_destination_pose'].add(
pick_and_place.RelinkDestinationPoseCodelet)
destination.config.root_frame = root_frame
# Task flow control
app.nodes['mission_feeder'].add(MissionFeeder)
app.nodes['mission_done_checker'].add(AllTasksDoneChecker)
app.nodes['task_remain_checker'].add(TasksRemainingChecker)
app.nodes['task_remover'].add(TaskRemover)
# Set task manager for all PyCodelets
for _, frontend in app._pycodelet_frontends.items():
frontend.task_planner = task_planner
# Load the mission subgraph and set the config based on the input parameters
app.load("packages/behavior_tree/apps/missions.graph.json", "mission")
mission_client = app.nodes["mission.tcp_client"]["JsonTcpClient"]
mission_client.config["host"] = args.mission_host
mission_client.config["port"] = args.mission_port
app.nodes["mission.mission_control"]["NodeGroup"].config["node_names"] = ["main_sequence"]
mission_robot_name = "{0}_{1}".format(args.robot_name, args.robot_index)
app.nodes["mission.robot_name"]["JsonMockup"].config.json_mock = {"text": mission_robot_name}
sim_output = app.nodes['simulation.interface']['output']
sim_output.config.host = args.sim_host
sim_output.config.port = args.sim_output_port
sim_input = app.nodes['simulation.interface']['input']
if args.sim_input_port is not None:
sim_input.config.port = args.sim_input_port
else:
sim_input.config.port = args.sim_output_port + 1
app.nodes["websight"]["WebsightServer"].config.port = args.sight_port
# Start the application.
app.run()
|
nilq/baby-python
|
python
|
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import datetime
from keras import models
from keras.layers import Dense
if __name__ == "__main__":
startTime = datetime.datetime.now()
x = np.load('data/train_w2v_data_array_500d.npy')
y = np.load('data/train_w2v_target_array_500d.npy')
y = y.astype('int')
y = y.flatten()
z = np.load('data/test_w2v_data_array_500d.npy')
t = np.load('data/test_w2v_target_array_500d.npy')
t = t.astype('int')
t = t.flatten()
learningRate = [0.1]
for lr in learningRate:
clf = MLPClassifier(solver='sgd', hidden_layer_sizes=(30,20), batch_size='auto',
learning_rate='adaptive', learning_rate_init=lr, early_stopping=True)
clf.fit(x, y)
p = clf.predict(z)
y_scores = clf.predict_proba(z)
# predicted = predict_nn(x, y, z, clf)
print("For learning rate: ", lr)
print("Word2Vec Neural Network with 500 features")
# Compute accuracy
accuracy = accuracy_score(t, p, normalize=False)
print("Accuracy: ", (accuracy / len(t)) * 100)
# Confusion matrix
confusion_matrix = confusion_matrix(t, p)
print("Confusion Matrix:\n", confusion_matrix)
# Replace 4s with 1s
t[np.where(t == 4)] = 1
p[np.where(p == 4)] = 1
# Plot the Precision-Recall curve
precision, recall, _ = precision_recall_curve(t, y_scores[:, 1])
plt.figure()
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
average_precision = average_precision_score(t, p)
plt.title('Neural Network Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
filename = "data/w2v_NN_500d_" + str(lr) + "_precisionRecall.png"
plt.savefig(filename)
|
nilq/baby-python
|
python
|
from setuptools import setup
version = '1.0.2'
setup(
name='django-mobi2',
version=version,
keywords='Django UserAgent',
description='Django middleware and view decorator to detect phones and small-screen devices',
long_description=open('README').read(),
url='https://github.com/django-xxx/django-mobi2.git',
author='Hackathon',
author_email='kimi.huang@brightcells.com',
packages=['mobi2'],
py_modules=[],
package_data={
'mobi2': ['*.txt']
},
install_requires=['django-six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Spreadsheet',
],
)
|
nilq/baby-python
|
python
|
import pickle
from tqdm import tqdm
import numpy as np
def save_stereotypes(animate_file, text_file, out_file):
"""
Save list of words that are stereotyped towards men or women
:param animate_file: list of noun pairs
:param text_file: file to test words counts on
:param out_file: output file
"""
with open(animate_file, "r") as f:
lines = f.readlines()
lines = [line.strip().split("\t") for line in lines]
words = list(zip([line[1] for line in lines], [line[2] for line in lines]))
with open(text_file) as f:
text = f.read()
text = text.split()
fem_main = []
masc_main = []
for i in tqdm(range(len(words)), total=len(words)):
fem, masc = words[i]
fem_count = text.count(fem) + text.count(fem.capitalize())
masc_count = text.count(masc) + text.count(masc.capitalize())
if .25 * fem_count >= masc_count and fem_count != 0:
fem_main.append((i, fem, masc))
elif .25 * masc_count >= fem_count and masc_count != 0:
masc_main.append((i, fem, masc))
print(len(fem_main), len(masc_main))
with open(out_file, "wb") as f:
pickle.dump(fem_main, f, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(masc_main, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_probs(prob_file):
"""
:param prob_file: File containing query probabilities
:return: list of negative log likelihoods
"""
with open(prob_file, "r") as f:
lines = f.readlines()
probs = [float(line.strip()) for line in lines]
return probs
def calc_romance_bias(probs):
"""
:param probs: list of negative log likelihoods for a romance language corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 32):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 18] + probs[idx + 22] + probs[idx + 26] + probs[idx + 30]
return bias / 8
def calc_romance_grammar(probs):
"""
:param probs: list of negative log likelihoods for a romance language corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 32):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 18] + probs[idx + 22] + probs[idx + 26] + probs[idx + 30]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 4
def calc_hebrew_bias(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
bias += probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
return bias / 4
def calc_hebrew_grammar(probs):
"""
:param probs: list of negative log likelihoods for a Hebrew corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 16):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 2
def calc_russian_bias(probs):
"""
:param probs: list of negative log likelihoods for a Russian coprus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 24):
bias -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
bias += probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
bias -= probs[idx + 17] + probs[idx + 19] + probs[idx + 21] + probs[idx + 23]
bias += probs[idx + 16] + probs[idx + 18] + probs[idx + 20] + probs[idx + 22]
return bias / 4
def calc_russian_grammar(probs):
"""
:param probs: list of negative log likelihoods for a Russian corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 16):
grammar -= probs[idx + 1] + probs[idx + 5] + probs[idx + 9] + probs[idx + 13]
grammar -= probs[idx + 2] + probs[idx + 6] + probs[idx + 10] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 4] + probs[idx + 8] + probs[idx + 12]
grammar += probs[idx + 19] + probs[idx + 23] + probs[idx + 27] + probs[idx + 31]
return grammar / 2
def calc_other_bias(probs):
"""
:param probs: list of negative log likelihoods for a corpus
:return: gender bias in corpus
"""
bias = 0
for idx in range(0, len(probs), 16):
bias -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
bias += probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
return bias / 4
def calc_other_grammar(probs):
"""
:param probs: list of negative log likelihoods for a corpus
:return: grammaticality of corpus
"""
grammar = 0
for idx in range(0, len(probs), 24):
grammar -= probs[idx + 1] + probs[idx + 3] + probs[idx + 5] + probs[idx + 7]
grammar -= probs[idx + 8] + probs[idx + 10] + probs[idx + 12] + probs[idx + 14]
grammar += probs[idx] + probs[idx + 2] + probs[idx + 4] + probs[idx + 6]
grammar += probs[idx + 9] + probs[idx + 11] + probs[idx + 13] + probs[idx + 15]
return grammar / 2
def get_bias_and_grammar():
"""
Print bias and grammaticality for spanish, french, hebrew, and italian corpora
"""
bias = []
grammar = []
for lang, lang_type in [("spanish", 1), ("new_queries_old_model_french", 1),
("new_queries_old_model_hebrew", 0), ("new_queries_old_model_italian", 1)]:
prob_file_o = "../results/" + lang + "_original-initial.outlogliks"
prob_file_s = "../results/" + lang + "_swap-initial.outlogliks"
prob_file_d = "../results/" + lang + "_debias-initial.outlogliks"
probs_o = get_probs(prob_file_o)
probs_s = get_probs(prob_file_s)
probs_d = get_probs(prob_file_d)
if lang_type == 0:
bias_o = calc_hebrew_bias(probs_o)
bias_d = calc_hebrew_bias(probs_s)
bias_s = calc_hebrew_bias(probs_d)
grammar_o = calc_hebrew_grammar(probs_o)
grammar_d = calc_hebrew_grammar(probs_s)
grammar_s = calc_hebrew_grammar(probs_d)
elif lang_type == 1:
bias_o = calc_romance_bias(probs_o)
bias_d = calc_romance_bias(probs_s)
bias_s = calc_romance_bias(probs_d)
grammar_o = calc_romance_grammar(probs_o)
grammar_d = calc_romance_grammar(probs_s)
grammar_s = calc_romance_grammar(probs_d)
elif lang_type == 2:
bias_o = calc_russian_bias(probs_o)
bias_d = calc_russian_bias(probs_s)
bias_s = calc_russian_bias(probs_d)
grammar_o = calc_russian_grammar(probs_o)
grammar_d = calc_russian_grammar(probs_s)
grammar_s = calc_russian_grammar(probs_d)
else:
bias_o = calc_other_bias(probs_o)
bias_d = calc_other_bias(probs_s)
bias_s = calc_other_bias(probs_d)
grammar_o = calc_other_grammar(probs_o)
grammar_d = calc_other_bias(probs_s)
grammar_s = calc_other_grammar(probs_d)
bias.append([bias_o, bias_s, bias_d])
grammar.append([grammar_o, grammar_s, grammar_d])
print("Bias")
for i in range(3):
print("\\addplot coordinates {(Esp,", bias[0][i],
") (Fra,", bias[1][i], ") (Heb,", bias[2][i], ") (Ita,", bias[3][i], ")};")
x = 0
for i in range(4):
x += bias[i][0] / bias[i][2]
print(bias[i][0] / bias[i][2])
print(x/4)
print("Grammar")
for i in range(3):
print("\\addplot coordinates {(Esp,", grammar[0][i],
") (Fra,", grammar[1][i], ") (Heb,", grammar[2][i], ") (Ita,", grammar[3][i], ")};")
x = 0
for i in range(4):
x += grammar[i][1] / grammar[i][2]
print(grammar[i][1] / grammar[i][2])
print(x/4)
|
nilq/baby-python
|
python
|
nome = str(input('Digite o nome: ')).strip()
caps = nome.upper()
truefalse = 'SILVA' in caps
print('Há SILVA no nome?\n', truefalse)
|
nilq/baby-python
|
python
|
# %%
import numpy as np
from scipy import spatial
x, y = np.mgrid[0:4, 0:4]
points = np.c_[x.ravel(), y.ravel()]
tree = spatial.cKDTree(points)
tree.query_ball_point([2, 0], 1)
tree.query_ball_point(points, 1)
# %%
tree.query_ball_tree(points, 1)
|
nilq/baby-python
|
python
|
# This is a log file. It is saved as .py so that the following notebooks can easily import it and use its information.
# started at: 2022.03.03-15:28:15
|
nilq/baby-python
|
python
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import pyauto_functional # Must come before pyauto (and thus, policy_base).
import policy_base
sys.path.append('/usr/local') # Required to import autotest libs.
from autotest.cros import constants
from autotest.cros import cryptohome
class ChromeosEphemeral(policy_base.PolicyTestBase):
"""Tests a policy that makes users ephemeral.
When this policy is enabled, no persistent information in the form of
cryptohome shadow directories or local state prefs should be created for
users. Additionally, any persistent information previously accumulated should
be cleared when a user first logs in after enabling the policy."""
_usernames = ('alice@example.com', 'bob@example.com')
def _SetEphemeralUsersEnabled(self, enabled):
"""Sets the ephemeral users device policy.
The show_user_names policy is set to False to ensure that even if the local
state is not being automatically cleared, the login screen never shows user
pods. This is required by the Login browser automation call.
"""
self.SetDevicePolicy({'ephemeral_users_enabled': enabled,
'show_user_names': False})
def _DoesVaultDirectoryExist(self, user_index):
user_hash = cryptohome.get_user_hash(self._usernames[user_index])
return os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash))
def _AssertLocalStatePrefsSet(self, user_indexes):
expected = sorted([self._usernames[index] for index in user_indexes])
# The OAuthTokenStatus pref is populated asynchronously. Checking whether it
# is set would lead to an ugly race.
for pref in ['LoggedInUsers', 'UserImages', 'UserDisplayEmail', ]:
actual = sorted(self.GetLocalStatePrefsInfo().Prefs(pref))
self.assertEqual(actual, expected,
msg='Expected to find prefs in local state for users.')
def _AssertLocalStatePrefsEmpty(self):
for pref in ['LoggedInUsers',
'UserImages',
'UserDisplayEmail',
'OAuthTokenStatus']:
self.assertFalse(self.GetLocalStatePrefsInfo().Prefs(pref),
msg='Expected to not find prefs in local state for any user.')
def _AssertVaultDirectoryExists(self, user_index):
self.assertTrue(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to exist.')
def _AssertVaultDirectoryDoesNotExist(self, user_index):
self.assertFalse(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to not exist.')
def _AssertVaultMounted(self, user_index, ephemeral):
if ephemeral:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_EPHEMERAL
fs_regex = constants.CRYPTOHOME_FS_REGEX_TMPFS
else:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW
fs_regex = constants.CRYPTOHOME_FS_REGEX_ANY
self.assertTrue(
cryptohome.is_vault_mounted(device_regex=device_regex,
fs_regex=fs_regex,
user=self._usernames[user_index],
allow_fail=True),
msg='Expected vault backed by %s to be mounted.' %
'tmpfs' if ephemeral else 'shadow directory')
def _AssertNoVaultMounted(self):
self.assertFalse(cryptohome.is_vault_mounted(allow_fail=True),
msg='Did not expect any vault to be mounted.')
def Login(self, user_index):
"""Convenience method to login to the usr at the given index."""
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
'dummy_password')
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
def testEnablingBeforeSession(self):
"""Checks that a new session can be made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsEmpty()
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingDuringSession(self):
"""Checks that an existing non-ephemeral session is not made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self._SetEphemeralUsersEnabled(True)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testDisablingDuringSession(self):
"""Checks that an existing ephemeral session is not made non-ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self._SetEphemeralUsersEnabled(False)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingEphemeralUsersCleansUp(self):
"""Checks that persistent information is cleared."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0])
self.Login(user_index=1)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0, 1])
self._AssertVaultDirectoryExists(user_index=0)
self._AssertVaultDirectoryExists(user_index=1)
self._SetEphemeralUsersEnabled(True)
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
self._AssertVaultDirectoryDoesNotExist(user_index=1)
if __name__ == '__main__':
pyauto_functional.Main()
|
nilq/baby-python
|
python
|
"""Unit test for the data_tuils module."""
import pytest
import candle
@pytest.mark.skip(reason="used by load_Xy_data_noheader")
def test_to_categorical():
pass
@pytest.mark.skip(reason="used by load_Xy_data2")
def test_convert_to_class():
pass
@pytest.mark.skip(reason="used by impute_and_scale_array")
def test_scale_array():
pass
# should we keep this?
@pytest.mark.skip(reason="impute_and_scale_array is not used")
def test_impute_and_scale_array():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_drop_impute_and_scale_dataframe():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_discretize_dataframe():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_discretize_array():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_lookup():
pass
# should we keep this?
@pytest.mark.skip(
reason="referenced in p1b1 but succeeded by load_csv_data. no longer used"
)
def test_load_X_data():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_load_X_data2():
pass
# should we keep this?
@pytest.mark.skip(reason="this function is not used")
def test_load_Xy_one_hot_data():
pass
# used by p1b2
def test_load_Xy_one_hot_data2():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B2/",
"train_data": "P1B2.dummy.train.csv",
"test_data": "P1B2.dummy.test.csv",
"feature_subsample": 0,
"shuffle": True,
"scaling": "minmax",
"val_split": 0.1,
"data_type": DEFAULT_DATATYPE,
}
file_train = candle.fetch_file(
params["data_url"] + params["train_data"], subdir="Pilot1"
)
file_test = candle.fetch_file(
params["data_url"] + params["test_data"], subdir="Pilot1"
)
seed = 2017
(x_train, y_train), (x_val, y_val), (x_test, y_test) = candle.load_Xy_one_hot_data2(
file_train,
file_test,
class_col=["cancer_type"],
drop_cols=["case_id", "cancer_type"],
n_cols=params["feature_subsample"],
shuffle=params["shuffle"],
scaling=params["scaling"],
validation_split=params["val_split"],
dtype=params["data_type"],
seed=seed,
)
assert x_train.shape == (9, 28204)
assert len(y_train) == 9
assert len(x_val) == 0
assert len(y_val) == 0
assert len(x_test) == 1
assert len(y_test) == 1
# should we keep this?
@pytest.mark.skip(reason="referenced in p1b2 but not used")
def test_load_Xy_data2():
pass
# used by tc1
def test_load_Xy_data_noheader():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/type-class/",
"train_data": "type_18_300_train.dummy.csv",
"test_data": "type_18_300_test.dummy.csv",
"data_type": DEFAULT_DATATYPE,
"classes": 36,
}
train_path = candle.fetch_file(params["data_url"] + params["train_data"], "Pilot1")
test_path = candle.fetch_file(params["data_url"] + params["test_data"], "Pilot1")
usecols = None
x_train, y_train, x_test, y_test = candle.load_Xy_data_noheader(
train_path,
test_path,
params["classes"],
usecols,
scaling="maxabs",
dtype=params["data_type"],
)
assert x_train.shape == (10, 60483)
assert len(y_train) == 10
assert x_test.shape == (2, 60483)
assert len(y_test) == 2
# used by p1b1
def test_load_csv_data():
import numpy as np
DEFAULT_DATATYPE = (
np.float32
) # will be replaced by default_utils.DEFAULT_DATATYPE once available
params = {
"data_url": "http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B1/",
"train_data": "P1B1.dummy.train.csv",
"test_data": "P1B1.dummy.test.csv",
"feature_subsample": 0,
"shuffle": False,
"scaling": "minmax",
"data_type": DEFAULT_DATATYPE,
"val_split": 0.1,
}
train_path = candle.fetch_file(params["data_url"] + params["train_data"], "Pilot1")
test_path = candle.fetch_file(params["data_url"] + params["test_data"], "Pilot1")
x_cols = None
drop_cols = ["case_id"]
onehot_cols = ["cancer_type"]
y_cols = ["cancer_type"]
seed = 2017
(
x_train,
y_train,
x_val,
y_val,
x_test,
y_test,
x_labels,
y_labels,
) = candle.load_csv_data(
train_path,
test_path,
x_cols=x_cols,
y_cols=y_cols,
drop_cols=drop_cols,
onehot_cols=onehot_cols,
n_cols=params["feature_subsample"],
shuffle=params["shuffle"],
scaling=params["scaling"],
dtype=params["data_type"],
validation_split=params["val_split"],
return_dataframe=False,
return_header=True,
nrows=params["train_samples"]
if "train_samples" in params and params["train_samples"] > 0
else None,
seed=seed,
)
assert len(x_train) == 9
assert len(x_train[0]) == 60483
assert len(y_train) == 9
assert len(x_val) == 1
assert len(y_val) == 1
assert len(x_test) == 1
assert len(y_test) == 1
assert len(x_labels) == 60483
assert len(y_labels) == 1
|
nilq/baby-python
|
python
|
import numpy as np
import scipy.optimize as so
import cv2
from . import cfilter, cresampler, clz4, report
from .struct import *
_LZ4_COMPRESSION_LEVEL = 9
def applyBestIntraCompression(img, dropThreshold, minRetSize, fastDecodeMode = 2):
h, w, nChannel = img.shape
def _addEx(filterModeList, baseMethod, baseFilter, baseDefilter, mode):
assert not baseMethod & 0xf0
EX2, EX4, EX6, EX8 = 0x10, 0x20, 0x30, 0x40
if(nChannel == 1):
if(mode < 2):
if(w % 4 == 0):
filterModeList.append((baseMethod | EX4, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 4, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 4)))
if(w % 6 == 0):
filterModeList.append((baseMethod | EX6, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 6, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 6)))
if(w % 8 == 0):
filterModeList.append((baseMethod | EX8, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 8, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 8)))
elif(nChannel == 2):
if(mode < 2):
if(w % 2 == 0):
filterModeList.append((baseMethod | EX2, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 2, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 2)))
if(w % 4 == 0):
filterModeList.append((baseMethod | EX4, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 4, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 4)))
elif(nChannel == 4 or nChannel == 3):
if(w % 2 == 0):
filterModeList.append((baseMethod | EX2, "filtered_ex", lambda x, d:cfilter.filterEx(baseFilter, x, 2, dropThreshold), lambda x:cfilter.defilterEx(baseDefilter, x, 2)))
filterModeList = [
# intraMethod, hint, filterFunc, defilterFunc
(FILTER_NONE, "lossless", lambda x, d:x.copy(), lambda x:x.copy()),
(FILTER_SUBTOP, "filtered", cfilter.filterSubTop, cfilter.defilterSubTop),
(FILTER_SUBLEFT, "filtered", cfilter.filterSubLeft, cfilter.defilterSubLeft),
]
if(fastDecodeMode < 1):
filterModeList.append((FILTER_SUBAVG, "filtered", cfilter.filterSubAvg, cfilter.defilterSubAvg))
_addEx(filterModeList, FILTER_SUBLEFT, cfilter.filterSubLeft, cfilter.defilterSubLeft, 0)
_addEx(filterModeList, FILTER_SUBAVG, cfilter.filterSubAvg, cfilter.defilterSubAvg, fastDecodeMode)
resultList = []
for intraMethod, hint, filterFunc, defilterFunc in filterModeList:
filtered = filterFunc(img, dropThreshold)
data = filtered.tobytes()
task = clz4.LZ4CompressionTask(data, clz4.COMPRESS_MODE_HC, _LZ4_COMPRESSION_LEVEL)
resultList.append((filtered, data, task, intraMethod, hint, filterFunc, defilterFunc))
del filtered, data, task
filtered, data, task, intraMethod, hint, filterFunc, defilterFunc = sorted(tuple(x for x in resultList), key = lambda x:len(x[2].get()))[0]
bestSize = len(task.get())
if(minRetSize == -1 or bestSize < minRetSize):
return {
"filtered": filtered,
"decompressed": defilterFunc(filtered),
"intraMethod": intraMethod,
"hint": hint,
"compressedSize": bestSize,
}
else:
return None
def applyDeltaCompression(channel, refChannel, dropThreshold, minRetSize):
if(dropThreshold > 0):
deltaedChannel = channel.astype(int) - refChannel.astype(int)
needDrop = np.logical_and(~np.logical_and(channel < dropThreshold, refChannel > dropThreshold), np.abs(deltaedChannel) <= dropThreshold)
deltaedChannel[needDrop] = 0
del needDrop
deltaedChannel = deltaedChannel.astype(channel.dtype)
else:
deltaedChannel = channel - refChannel
intraResult = applyBestIntraCompression(deltaedChannel, 0, minRetSize)
if(intraResult is not None):
intraResult["decompressed"] += refChannel
return intraResult
else:
return None
def applyBestFilter(currImgList, prevFullImgList, prevImgList, dropThreshold):
assert len(currImgList) == 2
assert prevFullImgList is None or len(prevFullImgList) == 2
assert prevImgList is None or len(prevImgList) == 2
assert dropThreshold >= 0
bestResult = []
bestSize = -1
bestMethod = REFERENCE_NONE
# full
for img in currImgList:
bestResult.append(applyBestIntraCompression(img, dropThreshold, -1))
bestSize += bestResult[-1]["compressedSize"]
report.do("Full: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in bestResult]), bestSize))
# prevFull
if(prevFullImgList is not None):
resultList = []
size = 0
for i, img in enumerate(currImgList):
resultList.append(applyDeltaCompression(img, prevFullImgList[i], dropThreshold, -1))
size += resultList[-1]["compressedSize"]
if(size < bestSize):
bestResult = resultList
bestSize = size
bestMethod = REFERENCE_PREVFULL
report.do("PrevFull: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in resultList]), size))
del resultList, size
# prev
if(prevImgList is not None and prevImgList is not prevFullImgList):
resultList = []
size = 0
for i, img in enumerate(currImgList):
resultList.append(applyDeltaCompression(img, prevImgList[i], dropThreshold, -1))
size += resultList[-1]["compressedSize"]
if(size < bestSize):
bestResult = resultList
bestSize = size
bestMethod = REFERENCE_PREV
report.do("Prev: intra %s, size %d" % (str([intraFilterMethodStr[x["intraMethod"]] for x in resultList]), size))
del resultList, size
report.do("Best delta method is %s" % (referenceMethodStr[bestMethod]))
return {
"bestResult": bestResult,
"bestSize": bestSize,
"deltaMethod": bestMethod,
}
|
nilq/baby-python
|
python
|
import math
import numpy as np
from datetime import datetime
startTime = datetime.now()
natural = range(1, 500000)
# Get list of prime numbers
def prime_list(max_prime):
primes = range(2, max_prime)
length = len(primes)
for idx in range(len(primes)):
p = primes[idx]
if p == 0:
continue
# No multiples of any prime is a prime
for i in range(2, (length + 1) / p + 1):
primes[p*i - 2] = 0
primes = [y for y in primes if y != 0]
return primes
# Construct list of triangles (= cummulative sum)
triangles = np.zeros(len(natural)).astype(np.int)
triangles[0] = 1
for i in range(1,len(natural)):
triangles[i] = natural[i] + triangles[i - 1]
# Find list of prime numbers
primes = prime_list(int(np.sqrt(triangles[-1]))) # Only need this many primes
done = False
for triangle_idx in range(len(triangles)):
if done:
break
tri = float(triangles[triangle_idx])
# Remove primes which does not constitute the considered number
lego = [prime for prime in primes if tri % prime == 0]
new_divisors = list(lego)
stored = []
new_found = True
while new_found:
# Fill with all combinations of primes and their products
tmp = np.zeros(len(lego)*len(new_divisors)).astype(np.int)
for i in range(len(lego)):
for j in range(len(new_divisors)):
# Make all combinations
tmp[i*len(new_divisors) + j] = lego[i]*new_divisors[j]
tmp2 = [new for new in tmp if tri % new == 0]
if set(new_divisors) == set(tmp2) or len(tmp2) == 0:
new_found = False
else:
stored += new_divisors
new_divisors = list(set(tmp2))
ans = len(stored) + 1 # Itself
if ans >= 500: # Don't try more triangle values
done = True
print 'triangle value', int(tri), 'with index', triangle_idx, 'gives', ans, 'possible divisors'
print 'primes:', lego
#print 'Possible divisors:', sorted(stored)
print datetime.now() - startTime
"""
prime_dict = {}
for p in range(len(lego)):
prime_dict[lego[p]] = 0
# Lego are the unique primes which builds the number.
# Find out how many primes the number is made from
nr_of_factors = 0
tmp_tri = tri
for i in range(len(lego)):
while tmp_tri % lego[i] == 0:
tmp_tri /= lego[i]
prime_dict[lego[i]] += 1
nr_of_factors += 1
print 'tri:', tri
print 'prime_dict', prime_dict
"""
"""
# When chosing 2 primes to make a factor for the number, and
# the number is made from, let's say 3 of the same prime, then
# limit those primes to 2 so the 'a chose b' doesn't produce
# identical copies. Chosing 2 out of [5, 5, 5] should only give
# [5, 5], i.e. there is only one way to do it.
chose_from = np.sum([min(prime_dict[lego[x]], i) for x in range(len(prime_dict))])
print 'chose', i, 'from', chose_from,':',math.factorial( chose_from ) / (math.factorial( chose_from - i ) * math.factorial( i ))
ans += math.factorial( chose_from ) / (math.factorial( chose_from - i ) * math.factorial( i ))
"""
"""
# With tri as 360, prime_dict is {2: 3, 3:2, 5:1}
# When grabbing 2 legos, we can take 0,1 or 2 of 2,
# 0,1 or 2 from 3, and 0 or 1 from 5.
# When grabbing 3 legos, we can take 0,1,2 or 3 of 2,
# 0,1 or 2 from 3, and 0 or 1 from 5.
# Search for these combinations where the sum of the
# number of lego pieces are 3.
# When grabbing 4 legos, we have the same options, but
# now we search for combinations where the sum is 4
# This generalizes to that we can take values from
# a range from 0 to min(#of pieces, #of legos in bin)
# in every bin.
# (Start searching from the bin with fewest legos to
# terminate search early.)
ans = 1 # Instead of reaching nr_of_factors which will give 1
for i in range(1, nr_of_factors): # Pick 1,2,3...
select = []
for piece_idx in range(len(lego)):
piece = lego[piece_idx]
# From 2*2*2, we can take 0,1,2 or 3 2's
select.append(range(prime_dict[piece] + 1) )
print select
print len(select)
print select[0][:]
for piece_idx in range(len(lego)):
hej = select[piece_idx][i] + select[piece_idx]
tjubadoo
"""
|
nilq/baby-python
|
python
|
# http://codeforces.com/contest/268/problem/C
n, m = map(int, input().split())
d = min(n, m)
print(d + 1)
for i in range(d + 1): print("{} {}".format(d-i, i))
|
nilq/baby-python
|
python
|
import numpy as np
from swarm import metrics
import pytest
# Example y with 11 points from -1.5 to 1.5.
y = np.array(
[
-0.997495,
-0.9320391,
-0.78332686,
-0.5646425,
-0.29552022,
0.0,
0.29552022,
0.5646425,
0.78332686,
0.9320391,
0.997495,
]
)
losses = np.array([[0.82777214, 0.82301313], [0.35649812, 0.35499558], [0.82012618, 0.81833321]])
# Example predictions for first two epochs of a swarm of three bees.
ypreds = np.array(
[
[
[
-0.75819135,
-0.6721624,
-0.5914593,
-0.5263963,
-0.4742774,
-0.42794737,
-0.4386463,
-0.45942548,
-0.5183165,
-0.6156955,
-0.7488868,
],
[
-0.75616974,
-0.6701199,
-0.5893732,
-0.5242175,
-0.4719131,
-0.42543185,
-0.43560237,
-0.45590907,
-0.51438874,
-0.61130494,
-0.74402857,
],
],
[
[
-0.18297303,
-0.21213517,
-0.18341143,
-0.15066521,
-0.11950047,
-0.09036797,
-0.0256229,
0.0269562,
0.06986493,
0.1414077,
0.19563401,
],
[
-0.18315202,
-0.21226275,
-0.18336335,
-0.15038337,
-0.11897573,
-0.08946133,
-0.0242492,
0.02882081,
0.07219976,
0.14433557,
0.19909364,
],
],
[
[
0.36912787,
0.34506714,
0.32219756,
0.3202601,
0.30032292,
0.259299,
0.21430482,
0.14271711,
0.05134173,
-0.063667,
-0.17867568,
],
[
0.36715215,
0.34335977,
0.32078195,
0.3192455,
0.2996201,
0.2587561,
0.21395013,
0.14270164,
0.05165949,
-0.06302758,
-0.1777146,
],
],
]
)
# An example of scores obtained for a swarm that bounce around on the way down.
epoch_scores = [
0.51727545,
0.4584964,
0.3589881,
0.2524824,
0.20734829,
0.2482427,
0.30246153,
0.3388226,
0.34041768,
0.3064342,
0.26800793,
0.2686419,
0.24010916,
0.18522426,
0.22644123,
0.26727045,
0.28942722,
0.28332102,
0.25410518,
0.22259913,
0.25512502,
0.28029743,
0.29604492,
0.30136263,
0.29408443,
0.27543014,
0.24885914,
0.21919054,
0.22593765,
0.2305434,
0.22474495,
0.21082267,
0.19170743,
0.17090012,
0.1521816,
0.13839552,
0.1299243,
0.12569669,
0.12456866,
0.12922356,
0.14023647,
0.15060309,
0.15662336,
0.15730526,
0.15512368,
0.15510257,
0.16903949,
0.1815229,
0.20310307,
0.21428823,
0.21110815,
0.19391632,
0.16897929,
0.15510854,
0.1513776,
0.15778454,
0.15062831,
0.1423014,
0.1533089,
0.16309854,
]
def test_summarise_across_bees_ypreds():
"""This shows how to get a summary feature for each point x in a swarm. Eg, the average of the swarms ypreds"""
for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:
out = summ_metric(ypreds, axis=0)
assert type(out) == np.ndarray
assert out.shape == (2, 11)
def test_summarise_across_bees_losses():
"""This shows how to get the average loss across a swarm"""
for summ_metric in [np.min, np.max, np.mean, np.median, np.std, np.ptp]:
out = summ_metric(losses, axis=0)
assert type(out) == np.ndarray
assert out.shape == (2,)
def test_rmse_2d():
b0_preds = ypreds[0]
out = metrics.mse_loss(b0_preds, y)
assert len(out.shape) == len(b0_preds.shape) - 1
assert (
np.max(np.abs(out - losses[0])) < 0.000001
) # I dont' know why this isn't exactly 0, have tried pytest.approx
b2_preds = ypreds[2]
out = metrics.mse_loss(b2_preds, y)
assert len(out.shape) == len(b2_preds.shape) - 1
assert np.max(np.abs(out - losses[2])) < 0.000001 # I dont' know why this isn't exactly 0
def test_rmse_3d():
out = metrics.mse_loss(ypreds, y)
assert len(out.shape) == len(ypreds.shape) - 1
assert np.max(np.abs(out - losses)) < 0.000001 # I don't know why this isn't exactly 0
def test_loss_mean_point_pred():
"""
This is an example of interest, since it is plausible (and of interest) if the averaged prediction of many bees
in a swarm, at a given point x, might tend to be better than any given one.
"""
mean_point_preds = np.mean(ypreds, axis=0)
loss_mean_preds = metrics.mse_loss(mean_point_preds, y)
assert loss_mean_preds.shape == (2,)
def test_if_nom_first_below():
epoch = metrics.iteration_threshold(epoch_scores, 0.25, "first", "below")
assert epoch_scores[epoch] <= 0.25
assert np.all(np.array(epoch_scores[:epoch]) > 0.25)
assert metrics.iteration_threshold(epoch_scores, 0.001, "first", "below") is None
def test_if_nom_always_below():
epoch = metrics.iteration_threshold(epoch_scores, 0.25, "always", "below")
assert np.max(epoch_scores[epoch:]) <= 0.25
assert epoch_scores[epoch - 1] > 0.25
assert metrics.iteration_threshold(epoch_scores, 0.001, "always", "below") is None
def test_if_nom_first_above():
reverse_scores = 1 - np.array(epoch_scores)
epoch = metrics.iteration_threshold(reverse_scores, 0.75, "first", "above")
assert reverse_scores[epoch] >= 0.75
assert np.all(reverse_scores[:epoch] < 0.75)
assert metrics.iteration_threshold(reverse_scores, 0.999, "first", "above") is None
def test_if_nom_always_above():
reverse_scores = 1 - np.array(epoch_scores)
epoch = metrics.iteration_threshold(reverse_scores, 0.75, "always", "above")
assert np.min(reverse_scores[epoch:]) >= 0.75
assert reverse_scores[epoch - 1] < 0.75
assert metrics.iteration_threshold(reverse_scores, 0.999, "always", "above") is None
def test_if_ratio_first_below():
epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, "first", "below")
epoch_ratios = np.array(epoch_scores) / epoch_scores[0]
assert epoch_ratios[epoch] <= 0.5
assert np.all(epoch_ratios[:epoch] > 0.5)
assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, "first", "below") is None
def test_if_ratio_always_below():
epoch = metrics.iteration_threshold_ratio(epoch_scores, 0.5, "always", "below")
epoch_ratios = np.array(epoch_scores) / epoch_scores[0]
assert np.max(epoch_ratios[epoch:]) <= 0.5
assert epoch_ratios[epoch - 1] > 0.5
assert metrics.iteration_threshold_ratio(epoch_scores, 0.001, "always", "below") is None
def test_if_ratio_first_above():
reverse_scores = 1 / np.array(epoch_scores)
epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.5, "first", "above", 3)
reverse_ratios = reverse_scores / reverse_scores[3]
assert reverse_ratios[epoch] >= 1.5
assert np.all(reverse_ratios[:epoch] < 1.5)
assert metrics.iteration_threshold_ratio(reverse_scores, 200, "first", "above") is None
def test_if_ratio_always_above():
reverse_scores = 1 / np.array(epoch_scores)
epoch = metrics.iteration_threshold_ratio(reverse_scores, 1.1, "always", "above", 3)
reverse_ratios = reverse_scores / reverse_scores[3]
assert np.min(reverse_ratios[epoch:]) >= 1.1
assert reverse_ratios[epoch - 1] < 1.1
assert metrics.iteration_threshold_ratio(reverse_scores, 200, "always", "above") is None
def test_if_ratio_error():
"""Should fail due to the score crossing zero"""
with pytest.raises(ValueError):
metrics.iteration_threshold_ratio(np.array([-0.1, 0, 0.1, 1]), 0.1)
|
nilq/baby-python
|
python
|
import numpy as np
from numpy.random import uniform
from veneer.pest_runtime import *
import pyapprox as pya
from scipy.stats import uniform
from functools import partial
from pyapprox.adaptive_sparse_grid import max_level_admissibility_function
from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator
from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth
from pyapprox.variable_transformations import AffineRandomVariableTransformation
from pyapprox.variables import IndependentMultivariateRandomVariable
num_vars = 2
alph = 5
bet = 5.
err_tol = 1e-7
a = np.random.uniform(0, 100, (num_vars, 1))
variable = IndependentMultivariateRandomVariable(
[uniform(0, 1)], [np.arange(num_vars)])
var_trans = AffineRandomVariableTransformation(
IndependentMultivariateRandomVariable(
[uniform(0, 1)], [np.arange(num_vars)]))
def function(x):
vals = [np.cos(np.pi*a[ii]*x[ii, :]) for ii in range(x.shape[0])]
vals = np.array(vals).sum(axis=0)[:, np.newaxis]
breakpoint()
return vals
# def run_source(x):
# """
# A test function for adaptive PCE.
# """
# y = np.array(x[0:10].sum() + x[10]**2 + x[11] * 4 + 0.1)
# # breakpoint()
# print(y.shape)
# return y.reshape(y.shape[0], 1)
# num_vars = variable.num_vars()
# Create PyApprox model
pce = pya.AdaptiveInducedPCE(num_vars, cond_tol=1e2)
# Define criteria
max_level = 4
# err_tol = 0.0
max_num_samples = 1000
max_level_1d = [max_level]*(pce.num_vars)
admissibility_function = partial(
max_level_admissibility_function, max_level, max_level_1d,
max_num_samples, err_tol)
refinement_indicator = variance_pce_refinement_indicator
pce.set_function(function, var_trans)
pce.set_refinement_functions(
refinement_indicator,
admissibility_function,
clenshaw_curtis_rule_growth
)
# Generate emulator
pce.build()
# fit the PCE
validation_samples = pya.generate_independent_random_samples(variable, 1000)
validation_vals = function(validation_samples)
hat_vals = pce(validation_samples)
np.std(validation_vals - hat_vals)
|
nilq/baby-python
|
python
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This maintains access tokens for API calls."""
import os
from abc import ABC, abstractmethod
import google.auth.transport.requests
from google.oauth2 import service_account
from flask import current_app
class TokenService(ABC): # pylint: disable=too-few-public-methods
"""Token Service abstract class with single get_token method."""
@classmethod
@abstractmethod
def get_token(cls):
"""Generate an OAuth access token with storage access."""
class GoogleStorageTokenService(TokenService): # pylint: disable=too-few-public-methods
"""Google Cloud Storage implementation.
Maintain access token for Google Cloud Storage API calls.
"""
# Google APIs and cloud storage
GCP_PROJECT_ID = os.getenv('GCP_CS_PROJECT_ID')
GCP_SA_CLIENT_EMAIL = os.getenv('GCP_CS_SA_CLIENT_EMAIL')
GCP_SA_CLIENT_ID = os.getenv('GCP_CS_SA_CLIENT_ID')
GCP_SA_PRIVATE_KEY = os.getenv('GCP_CS_SA_PRIVATE_KEY')
GCP_SA_PRIVATE_KEY_ID = os.getenv('GCP_CS_SA_PRIVATE_KEY_ID')
GCP_SA_CERT_URL = os.getenv('GCP_CS_SA_CERT_URL')
# https://developers.google.com/identity/protocols/oauth2/scopes
GCP_SA_SCOPES = [os.getenv('GCP_CS_SA_SCOPES', 'https://www.googleapis.com/auth/cloud-platform')]
service_account_info = {
'type': 'service_account',
'project_id': GCP_PROJECT_ID,
'private_key_id': GCP_SA_PRIVATE_KEY_ID,
'private_key': str(GCP_SA_PRIVATE_KEY).replace('\\n', '\n'),
'client_email': GCP_SA_CLIENT_EMAIL,
'client_id': GCP_SA_CLIENT_ID,
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'token_uri': 'https://oauth2.googleapis.com/token',
'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs',
'client_x509_cert_url': GCP_SA_CERT_URL
}
credentials = None
@classmethod
def get_token(cls):
"""Generate an OAuth access token with cloud storage access."""
if cls.credentials is None:
cls.credentials = service_account.Credentials.from_service_account_info(cls.service_account_info,
scopes=cls.GCP_SA_SCOPES)
request = google.auth.transport.requests.Request()
cls.credentials.refresh(request)
current_app.logger.info('Call successful: obtained token.')
return cls.credentials.token
@classmethod
def get_credentials(cls):
"""Generate GCP auth credentials to pass to a GCP client."""
if cls.credentials is None:
cls.credentials = service_account.Credentials.from_service_account_info(cls.service_account_info,
scopes=cls.GCP_SA_SCOPES)
current_app.logger.info('Call successful: obtained credentials.')
return cls.credentials
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#fileencoding: utf-8
#-----------------------------------------------#
# python standard library
#-----------------------------------------------#
import calendar
import csv
from enum import Enum
from datetime import datetime as dt
#-----------------------------------------------#
# pip
#-----------------------------------------------#
from oauth2client import tools
#-----------------------------------------------#
# my lib
#-----------------------------------------------#
import gspread
from zaimapi import ZaimAPI, ZaimLocalDB
class Payer(Enum):
UNKNOWN = 0
alpha = 1
beta = 2
class PaymentFmt:
Header = []
Header.append("日付")
Header.append("カテゴリ")
Header.append("ジャンル")
Header.append("商品名")
Header.append("メモ")
Header.append("場所")
Header.append("支出額")
Header.append("alpha支払額")
Header.append("beta支払額")
Header.append("alpha負担額")
Header.append("beta負担額")
Header.append("alpha個人用")
Header.append("beta個人用")
def __init__(self):
pass
class Payment:
def __init__(self, date, category, genre, name, comment, place, price):
self.date = date
self.category = category
self.genre = genre
self.name = name
self.comment = comment
self.place = place
self.price = price
self.alpha_paid = 0
self.beta_paid = 0
self.alpha_owe = 0
self.beta_owe = 0
self.alpha_self_paid = 0
self.beta_self_paid = 0
self.id_paid = 0
self._set_paid()
self._set_owe()
def __repr__(self):
return " ".join([str(i) for i in self.to_list()])
def _pay_for_myself(self):
return "個人_" in self.category
def is_for_oneself(self):
return self._pay_for_myself()
def _who_paid(self):
if "_alpha" in self.category:
return Payer.alpha
elif "_beta" in self.category:
return Payer.beta
else:
return Payer.UNKNOWN
def _paid_by_id(self):
if "id" == self.comment.strip().split("\n")[0]:
return True
else:
return False
def get_normalized_category(self):
return self.category.replace("_alpha", "").replace("_beta", "").replace("個人_", "")
def _set_paid(self):
if self._who_paid() == Payer.alpha:
if self._pay_for_myself():
self.alpha_self_paid += self.price
else:
self.alpha_paid += self.price
elif self._who_paid() == Payer.beta:
if self._pay_for_myself():
self.beta_self_paid += self.price
else:
self.beta_paid += self.price
else:
self.beta_paid = self.price // 2
self.alpha_paid = self.price - self.beta_paid
def _set_owe(self):
if self._pay_for_myself():
return
if "dp" == self.comment.strip().split("\n")[0]:
return
category = self.get_normalized_category()
genre = self.genre
self.beta_owe = self.price // 2
self.alpha_owe = self.price - self.beta_owe
def get_date(self):
return self.date
def get_date_str(self):
return "{}-{:02d}".format(self.date.year, self.date.month)
def get_category(self):
return self.category
def get_genre(self):
return self.genre
def get_name(self):
return self.name
def get_place(self):
return self.place
def get_price(self):
return self.price
def get_alpha_paid(self):
return self.alpha_paid
def get_beta_paid(self):
return self.beta_paid
def get_alpha_owe(self):
return self.alpha_owe
def get_beta_owe(self):
return self.beta_owe
def get_alpha_self_paid(self):
return self.alpha_self_paid
def get_beta_self_paid(self):
return self.beta_self_paid
def to_list(self):
ret = []
ret.append("{}-{}-{}".format(self.date.year, self.date.month, self.date.day))
ret.append(self.category)
ret.append(self.genre)
ret.append(self.name)
ret.append(self.comment)
ret.append(self.place)
ret.append(self.price)
ret.append(self.alpha_paid)
ret.append(self.beta_paid)
ret.append(self.alpha_owe)
ret.append(self.beta_owe)
ret.append(self.alpha_self_paid)
ret.append(self.beta_self_paid)
return ret
class PaymentSummary:
def __init__(self):
self.payments = []
self.category_total = {}
self.alpha_category_total = {}
self.beta_category_total = {}
self.alpha_paid = 0
self.beta_paid = 0
self.alpha_owe = 0
self.beta_owe = 0
self.alpha_self_paid = 0
self.beta_self_paid = 0
def append(self, pay):
self.payments.append(pay)
ncat = pay.get_normalized_category()
if not pay.is_for_oneself():
self.category_total[ncat] = self.category_total.get(ncat, 0) + pay.get_price()
self.alpha_paid += pay.get_alpha_paid()
self.beta_paid += pay.get_beta_paid()
self.alpha_owe += pay.get_alpha_owe()
self.beta_owe += pay.get_beta_owe()
else:
self.alpha_category_total[ncat] = self.alpha_category_total.get(ncat, 0) + pay.get_alpha_self_paid()
self.beta_category_total[ncat] = self.beta_category_total.get(ncat, 0) + pay.get_beta_self_paid()
self.alpha_self_paid += pay.get_alpha_self_paid()
self.beta_self_paid += pay.get_beta_self_paid()
def get_category_total(self):
return self.category_total
def get_alpha_category_total(self):
return self.alpha_category_total
def get_beta_category_total(self):
return self.beta_category_total
def get_alpha_paid_total(self):
return self.alpha_paid
def get_beta_paid_total(self):
return self.beta_paid
def get_alpha_owe_total(self):
return self.alpha_owe
def get_beta_owe_total(self):
return self.beta_owe
def get_alpha_self_paid_total(self):
return self.alpha_self_paid
def get_beta_self_paid_total(self):
return self.beta_self_paid
def read_csv(filename):
payments = []
with open(filename, "r") as f:
reader = csv.reader(f)
header = next(f)
for r in reader:
date = dt.strptime(r[0], "%Y-%m-%d")
category = r[2]
genre = r[3]
name = r[6]
place = r[8]
comment = r[9]
price = int(r[11])
payments.append(Payment(date, category, genre, name, comment, place, price))
return payments
def get_data_by_api(apikey_filename, start_date, end_date):
z = ZaimAPI(apikey_filename)
print("(1/1) Get data by Zaim REST API")
entries = z.get_entries(start_date, end_date)
return entries
def update_local_db(entries, this_month):
zldb = ZaimLocalDB("./zaim.db")
print("(1/2) delete entries in {}".format(this_month))
zldb.delete_entries_by_date(this_month)
print("(2/2) update entries in {}".format(this_month))
zldb.update_entries(entries)
def gen_payments(entries):
payments = []
for r in entries[::-1]:
date = dt.strptime(r["date"], "%Y-%m-%d")
category = r["category"]
genre = r["genre"]
name = r["name"]
place = r["place"]
price = int(r["amount"])
comment = r["comment"]
payments.append(Payment(date, category, genre, name, comment, place, price))
return payments
def gen_reqvalues(pay_lists):
summary = PaymentSummary()
for p in pay_lists:
summary.append(p)
alpha_paid = summary.get_alpha_paid_total()
beta_paid = summary.get_beta_paid_total()
alpha_owe = summary.get_alpha_owe_total()
beta_owe = summary.get_beta_owe_total()
alpha_self_paid = summary.get_alpha_self_paid_total()
beta_self_paid = summary.get_beta_self_paid_total()
values = []
values.append(["■支払額"])
values.append(["alpha支払い額", alpha_paid, "=sum(h:h)"])
values.append(["beta支払い額", beta_paid, "=sum(i:i)"])
values.append(["合計", alpha_paid + beta_paid, "=sum(c2:c3)"])
values.append([""])
values.append(["■負担額"])
values.append(["alpha負担額", alpha_owe, "=sum(j:j)"])
values.append(["beta負担額", beta_owe, "=sum(k:k)"])
print("total_paid:", alpha_paid+beta_paid)
print("alpha_paid:", alpha_paid)
print("beta_paid:", beta_paid)
print("alpha_owe:", alpha_owe)
print("beta_owe:", beta_owe)
diff = alpha_paid - alpha_owe
if diff >= 0:
print("beta -> alpha:", diff)
values.append(["清算(betaからalpha)", diff, "=c2-c7"])
else:
print("alpha -> beta:", diff)
values.append(["清算(alphaからbeta)", diff, "=c7-c2"])
values.append([""])
values.append(["■カテゴリ別合計"])
for k, v in summary.get_category_total().items():
values.append([k, v])
values.append([""])
values.append(["■ 個人会計"])
values.append(["alpha個人合計", alpha_self_paid])
for k, v in summary.get_alpha_category_total().items():
values.append([k, v])
values.append([""])
values.append(["beta個人会計", beta_self_paid])
for k, v in summary.get_beta_category_total().items():
values.append([k, v])
values.append([""])
values.append(["■全エントリ"])
values.append(PaymentFmt.Header)
for p in pay_lists:
values.append(p.to_list())
return values
#-----------------------------------------------#
def main():
n = dt.now()
start_default = "{}-{:02d}-01".format(n.year, n.month)
end_default = "{}-{:02d}-{:02d}".format(n.year, n.month, calendar.monthrange(n.year, n.month)[1])
try:
import argparse
parent_parser = argparse.ArgumentParser(parents=[tools.argparser])
parent_parser.add_argument("--credential", type=str, default="sheets.googleapis.my-kakeibo.json")
parent_parser.add_argument("--start", type=str, default=start_default)
parent_parser.add_argument("--end", type=str, default=end_default)
parent_parser.add_argument("--zaimapikey", type=str, default="zaim_secret.json")
parent_parser.add_argument("--csv", type=str, default="")
parent_parser.add_argument("--spreadsheet", action="store_true")
flags = parent_parser.parse_args()
except ImportError:
flags = None
print("span: ", flags.start, flags.end)
if flags.spreadsheet == True:
num_of_steps = 4
else:
num_of_steps = 3
if flags.csv != "":
print("************* Start parsing CSV file *************")
pay_lists = read_csv(flags.csv)
print("************* End parsing CSV file *************")
else:
print("[1/{}] Get data from Zaim".format(num_of_steps))
entries = get_data_by_api(flags.zaimapikey, flags.start, flags.end)
print("[2/{}] Update local DB".format(num_of_steps))
this_month = flags.start[:7]
update_local_db(entries, this_month)
print("[3/{}] Calc payments".format(num_of_steps))
pay_lists = gen_payments(entries)
values = gen_reqvalues(pay_lists)
values.append([""])
print("")
if flags.spreadsheet:
print("[4/{}] Send data to Google Spreadsheet".format(num_of_steps))
print("sheet_name:", pay_lists[0].get_date_str())
#print(values)
g = gspread.Gspread(flags)
print("(1/2) create a sheet whose name is {}".format(pay_lists[0].get_date_str()))
result = g.create_new_sheet(pay_lists[0].get_date_str())
print(result) # fixme: check result
sheet_name = pay_lists[0].get_date_str()
start_column = "A"
end_column = chr(ord("A") + len(PaymentFmt.Header))
range_name = "{}!{}:{}".format(sheet_name, start_column, end_column)
print("range_name:", range_name)
value_input_option = "USER_ENTERED"
print("(2/2) append data to the sheet")
result = g.append_data(range_name, value_input_option, values)
print(result) # fixme: check result
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
from collections import namedtuple
class IDBase(str):
_attrs = (
# ('server_id', 0, 12, ServerID),
# ('_non_attr', 12, 13, validator),
# ('mountpoint_index', 13, 16, MountPointIndex),
# ('port', 13, 16, _port),
)
_str_len = 0
_tostr_fmt = '' # '{attr_1}-{attr_2:0>3}'
def __new__(clz, *args, **kwargs):
if len(args) + len(kwargs) == 1:
# New from a single serialized string
s = (list(args) + kwargs.values())[0]
s = str(s)
return clz._new_by_str(s)
else:
# multi args: new by making an instance
return clz._new_by_attrs(*args, **kwargs)
@classmethod
def _new_by_attrs(clz, *args, **kwargs):
# Create a namedtuple to simplify arguments receiving
tuple_type = namedtuple('_' + clz.__name__,
' '.join([x[0]
for x in clz._attrs
if clz._is_key_attr(x)
]))
t = tuple_type(*args, **kwargs)
# warn: if the value is float and _tostr_fmt is with float format,
# raise ValueError. Not convert to string?
s = clz._tostr_fmt.format(**{k: str(v)
for k, v in t._asdict().items()})
return clz._new_by_str(s)
@classmethod
def _new_by_str(clz, s):
if len(s) != clz._str_len:
raise ValueError('Expected {clz} length'
' to be {l} but {sl}: {s}'.format(
clz=clz.__name__,
l=clz._str_len,
sl=len(s),
s=s))
x = super(IDBase, clz).__new__(clz, s)
id_attrs = []
for attr_definition in clz._attrs:
k, start_idx, end_idx, attr_type, opt = clz._normalize(attr_definition)
if opt['self']:
val = x
else:
val = attr_type(s[start_idx:end_idx])
if opt['embed']:
for a in val._id_base_attrs:
if not a.startswith('_'):
super(IDBase, x).__setattr__(a, getattr(val, a))
id_attrs.append(a)
if k.startswith('_'):
continue
super(IDBase, x).__setattr__(k, val)
id_attrs.append(k)
super(IDBase, x).__setattr__('_id_base_attrs', tuple(id_attrs))
return x
@classmethod
def _is_key_attr(clz, attr_definition):
name, s, e, attr_type, opt = clz._normalize(attr_definition)
if name.startswith('_'):
return False
return opt['key_attr']
@classmethod
def _normalize(clz, attr_definition):
name, s, e, attr_type, opt = (attr_definition + (None,))[:5]
if opt is None:
opt = {}
elif opt is False:
opt = {'key_attr': False}
elif opt == 'self':
opt = {'key_attr': False, 'self': True}
elif opt == 'embed':
opt = {'embed': True}
else:
pass
tmpl = {'key_attr': True,
'self': False,
'embed': False,
}
tmpl.update(opt)
opt = tmpl
if opt['self']:
opt['key_attr'] = False
return name, s, e, attr_type, opt
def __setattr__(self, n, v):
raise TypeError('{clz} does not allow to change attribute'.format(
clz=self.__class__.__name__))
def as_tuple(self):
lst = []
for attr_definition in self._attrs:
k = attr_definition[0]
if IDBase._is_key_attr(attr_definition):
lst.append(getattr(self, k))
return tuple(lst)
|
nilq/baby-python
|
python
|
"""
Test execution of at and cron style scheduler policies when group has updates
"""
from test_repo.autoscale.fixtures import AutoscaleFixture
from time import sleep
class UpdateSchedulerScalingPolicy(AutoscaleFixture):
"""
Verify update scheduler policy
"""
@classmethod
def setUpClass(cls):
"""
Define updates to launch config
"""
super(UpdateSchedulerScalingPolicy, cls).setUpClass()
cls.upd_server_name = "upd_lc_config"
cls.upd_image_ref = cls.lc_image_ref_alt
cls.upd_flavor_ref = "3"
def test_system_min_max_entities_at_style(self):
"""
Create a scaling group with minentities between 0 and maxentities and
maxentities=change, with 2 at style scheduler policies with change= +2 and -2,
cooldown=0 and verify that the scale up scheduler policy scales upto the
max entities specified on the group
and scale down scheduler policy scales down upto the minentities.
"""
minentities = 1
maxentities = 2
group = self._create_group(
cooldown=0, minentities=minentities, maxentities=maxentities)
self.create_default_at_style_policy_wait_for_execution(
group_id=group.id, change=maxentities + 1)
self.verify_group_state(group.id, group.groupConfiguration.maxEntities)
self.create_default_at_style_policy_wait_for_execution(
group_id=group.id, change=maxentities,
scale_down=True)
self.verify_group_state(group.id, group.groupConfiguration.minEntities)
self.empty_scaling_group(group)
def test_system_min_max_entities_cron_style(self):
"""
Create a scaling group with minentities between 0 and maxentities and maxentities=change,
with 2 cron style scheduler policies with change= +2 and -2, cooldown=0 and verify that
the scale up scheduler policy scales upto the maxentities specified on the group
and scale down scheduler policy scales down upto the minentities.
Note: The group and policy cooldown are 0 and the scale up and scale down policies
will keep trying to scale up beyond maxentities and scale down below minentities
but will not be executed as min/maxenetities are met, until group is deleted.
"""
minentities = 1
maxentities = 2
group = self._create_group(
cooldown=0, minentities=minentities, maxentities=maxentities)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=0,
sp_change=maxentities + 1,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
self.verify_group_state(group.id, group.groupConfiguration.maxEntities)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=0,
sp_change=-maxentities,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
self.verify_group_state(group.id, group.groupConfiguration.minEntities)
self.empty_scaling_group(group)
def test_system_group_cooldown_atstyle(self):
"""
Create a scaling group with cooldown>0, create a scheduler at style policy
and wait for its execution, creating another at style policy scheduled
to execute before the cooldown period expires does not trigger.
Creating a 3rd at style policy after the cooldown, executes successfully.
"""
group = self._create_group(cooldown=60)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change)
sleep(60 - self.scheduler_interval)
self.create_default_at_style_policy_wait_for_execution(group.id)
self.verify_group_state(group.id, self.sp_change * 2)
self.empty_scaling_group(group)
def test_system_upd_launch_config_at_style_scheduler(self):
"""
Create a scaling group with minentities>0, update launch config, schedule at style
policy to scale up and verify the new servers of the latest launch config,
then schedule an at style policy to scale down and verify the servers remaining
are of the latest launch config.
"""
group = self._create_group(minentities=self.sp_change)
active_list_b4_upd = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._update_launch_config(group)
self.create_default_at_style_policy_wait_for_execution(group.id)
active_servers = self.sp_change + group.groupConfiguration.minEntities
active_list_after_scale_up = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=active_servers)
upd_lc_server = set(
active_list_after_scale_up) - set(active_list_b4_upd)
self._verify_server_list_for_launch_config(upd_lc_server)
self.create_default_at_style_policy_wait_for_execution(
group.id, scale_down=True)
active_list_on_scale_down = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._verify_server_list_for_launch_config(active_list_on_scale_down)
self.empty_scaling_group(group)
def test_system_upd_launch_config_cron_style_scheduler(self):
"""
Create a scaling group with minentities>0, update launch config, schedule cron style
policy to scale up and verify the new servers of the latest launch config,
then schedule another cron style policy to scale down and verify the servers remaining
are of the latest launch config.
"""
group = self._create_group(minentities=self.sp_change)
active_list_b4_upd = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._update_launch_config(group)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=3600,
sp_change=self.sp_change,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
active_servers = self.sp_change + group.groupConfiguration.minEntities
active_list_after_scale_up = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=active_servers)
upd_lc_server = set(
active_list_after_scale_up) - set(active_list_b4_upd)
self._verify_server_list_for_launch_config(upd_lc_server)
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group.id,
sp_cooldown=3600,
sp_change=-self.sp_change,
schedule_cron='* * * * *')
sleep(60 + self.scheduler_interval)
active_list_on_scale_down = self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=group.id,
expected_servers=group.groupConfiguration.minEntities)
self._verify_server_list_for_launch_config(active_list_on_scale_down)
self.empty_scaling_group(group)
def _create_group(self, cooldown=None, minentities=None, maxentities=None):
create_group_response = self.autoscale_behaviors.create_scaling_group_given(
gc_cooldown=cooldown,
gc_min_entities=minentities,
gc_max_entities=maxentities,
lc_name='upd_grp_scheduled')
group = create_group_response.entity
self.resources.add(group.id,
self.autoscale_client.delete_scaling_group)
return group
def _update_launch_config(self, group):
"""
Update the scaling group's launch configuration and
assert the update was successful.
"""
update_launch_config_response = self.autoscale_client.update_launch_config(
group_id=group.id,
name=self.upd_server_name,
image_ref=self.upd_image_ref,
flavor_ref=self.upd_flavor_ref)
self.assertEquals(update_launch_config_response.status_code, 204,
msg='Updating launch config failed with {0} for group {1}'
.format(update_launch_config_response, group.id))
def _verify_server_list_for_launch_config(self, server_list):
for each in list(server_list):
get_server_resp = self.server_client.get_server(each)
server = get_server_resp.entity
self.assertTrue(self.upd_server_name in server.name)
self.assertEquals(server.image.id, self.lc_image_ref_alt)
self.assertEquals(server.flavor.id, self.upd_flavor_ref)
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
CardPay REST API
Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cardpay.api_client import ApiClient
class PayoutsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_payout(self, payout_request, **kwargs): # noqa: E501
"""Create payout # noqa: E501
:param PayoutRequest payout_request: payoutRequest (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.create_payout_with_http_info(
payout_request, **kwargs
) # noqa: E501
return data
def create_payout_with_http_info(self, payout_request, **kwargs): # noqa: E501
"""Create payout # noqa: E501
:param PayoutRequest payout_request: payoutRequest (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_request"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_request' is set
if "payout_request" not in params or params["payout_request"] is None:
raise ValueError(
"Missing the required parameter `payout_request` when calling `create_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "payout_request" in params:
body_params = params["payout_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payout(self, payout_id, **kwargs): # noqa: E501
"""Read payout information # noqa: E501
:param str payout_id: Payout ID (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_payout_with_http_info(payout_id, **kwargs) # noqa: E501
return data
def get_payout_with_http_info(self, payout_id, **kwargs): # noqa: E501
"""Read payout information # noqa: E501
:param str payout_id: Payout ID (required)
:return: PayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_id"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_id' is set
if "payout_id" not in params or params["payout_id"] is None:
raise ValueError(
"Missing the required parameter `payout_id` when calling `get_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "payout_id" in params:
path_params["payoutId"] = params["payout_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts/{payoutId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payouts(self, request_id, **kwargs): # noqa: E501
"""Get payouts information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000, minimal value is 1)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PayoutsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_payouts_with_http_info(request_id, **kwargs) # noqa: E501
return data
def get_payouts_with_http_info(self, request_id, **kwargs): # noqa: E501
"""Get payouts information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000, minimal value is 1)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PayoutsList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
"request_id",
"currency",
"end_time",
"max_count",
"merchant_order_id",
"payment_method",
"sort_order",
"start_time",
] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payouts" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'request_id' is set
if "request_id" not in params or params["request_id"] is None:
raise ValueError(
"Missing the required parameter `request_id` when calling `get_payouts`"
) # noqa: E501
if "request_id" in params and len(params["request_id"]) > 50:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "request_id" in params and len(params["request_id"]) < 1:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payouts`, length must be greater than or equal to `1`"
) # noqa: E501
if "max_count" in params and params["max_count"] > 10000: # noqa: E501
raise ValueError(
"Invalid value for parameter `max_count` when calling `get_payouts`, must be a value less than or equal to `10000`"
) # noqa: E501
if "max_count" in params and params["max_count"] < 1: # noqa: E501
raise ValueError(
"Invalid value for parameter `max_count` when calling `get_payouts`, must be a value greater than or equal to `1`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) > 50:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) < 0:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payouts`, length must be greater than or equal to `0`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) > 50:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payouts`, length must be less than or equal to `50`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) < 0:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payouts`, length must be greater than or equal to `0`"
) # noqa: E501
if "sort_order" in params and not re.search(
r"asc|desc", params["sort_order"]
): # noqa: E501
raise ValueError(
"Invalid value for parameter `sort_order` when calling `get_payouts`, must conform to the pattern `/asc|desc/`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if "currency" in params:
query_params.append(("currency", params["currency"])) # noqa: E501
if "end_time" in params:
query_params.append(("end_time", params["end_time"])) # noqa: E501
if "max_count" in params:
query_params.append(("max_count", params["max_count"])) # noqa: E501
if "merchant_order_id" in params:
query_params.append(
("merchant_order_id", params["merchant_order_id"])
) # noqa: E501
if "payment_method" in params:
query_params.append(
("payment_method", params["payment_method"])
) # noqa: E501
if "request_id" in params:
query_params.append(("request_id", params["request_id"])) # noqa: E501
if "sort_order" in params:
query_params.append(("sort_order", params["sort_order"])) # noqa: E501
if "start_time" in params:
query_params.append(("start_time", params["start_time"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutsList", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_payout(self, payout_id, payout_update_request, **kwargs): # noqa: E501
"""Update payout # noqa: E501
:param str payout_id: Payout ID (required)
:param PayoutUpdateRequest payout_update_request: payoutUpdateRequest (required)
:return: PayoutUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.update_payout_with_http_info(
payout_id, payout_update_request, **kwargs
) # noqa: E501
return data
def update_payout_with_http_info(
self, payout_id, payout_update_request, **kwargs
): # noqa: E501
"""Update payout # noqa: E501
:param str payout_id: Payout ID (required)
:param PayoutUpdateRequest payout_update_request: payoutUpdateRequest (required)
:return: PayoutUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payout_id", "payout_update_request"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_payout" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payout_id' is set
if "payout_id" not in params or params["payout_id"] is None:
raise ValueError(
"Missing the required parameter `payout_id` when calling `update_payout`"
) # noqa: E501
# verify the required parameter 'payout_update_request' is set
if (
"payout_update_request" not in params
or params["payout_update_request"] is None
):
raise ValueError(
"Missing the required parameter `payout_update_request` when calling `update_payout`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "payout_id" in params:
path_params["payoutId"] = params["payout_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "payout_update_request" in params:
body_params = params["payout_update_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payouts/{payoutId}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayoutUpdateResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
nilq/baby-python
|
python
|
import numpy as np
## Wan-Ting borrow this function from io from stmpy folder.
def _make_attr(self, attr, names, data):
'''
Trys to give object an attribute from self.data by looking through
each key in names. It will add only the fist match, so the order of
names dictates the preferences.
Inputs:
attr - Required : Name of new attribute
names - Required : List of names to search for
data - Required : Name of a current attribute in which the new
attribute is stored.
Returns:
1 - If successfully added the attribute
0 - If name is not found.
History:
2017-08-11 - HP : Initial commit.
2017-08-24 - HP : Now uses grid z value for Z attribute.
'''
dat = getattr(self, data)
for name in names:
if name in dat.keys():
setattr(self, attr, dat[name])
return 1
return 0
def loadsm4(filePath):
'''
The load_sm4 can now output several attributes: including I, iv, LIY, didv, didvStd, Z, en
Inputs:
filePath- Required : Name of the file
Returns:
self.info - information of the pages
self.header - details of the pages
self.data - all the data from all of the pages
self.en - x axis for the spectropscopy data
self.Z - Topography of the data
self.I - Spectropscopy of the current data
self.iv - Average of the current spectroscopy data
self.LIY - Spectropscopy of the didv data
self.didv - Average of the didv spectroscopy data
self.didvStd - Standard deviation of all the didv spectropscopy data
History:
2020-07-15 - WT : Initial commit.
'''
import rhk_stmpy.rhk_sm4 as sm4
f = sm4.load_sm4(filePath)
self = Spy()
self.info = {}
self.info = f.print_info()
name = f.print_info().iloc[:, 0].to_numpy()
it = f.print_info().iloc[:, 1].to_numpy()
namef = np.char.strip(it.astype(str), 'DATA_')
names = namef + name
label = {}
for ix, item in zip(range(0,len(names)), names):
label[ix] = item
self.data = {}
for ix, line in enumerate(f):
self.data[ix] = f[ix].data
self.header = {}
for ix, line in enumerate(f):
self.header[ix] = f[ix].attrs
def getf(channel):
res = 100
for key in label:
if(label[key] == channel):
res = list(label.values()).index(channel)
return(res)
liy = getf('LINELIA Current')
i = getf('LINECurrent')
z = getf('IMAGETopography')
self.en = {}
if liy < 100:
self.en = f[liy].coords[1][1]
else:
self.en = f[0].coords[1][1]
if _make_attr(self, 'LIY', [liy], 'data'):
self.didv = np.mean(self.LIY, axis=0)
self.didvStd = np.std(self.LIY, axis=0)
else:
print('ERR: LIY channel not found')
if _make_attr(self, 'I', [i], 'data'):
self.iv = np.mean(self.I, axis=0)
else:
print('ERR: Current not found')
if _make_attr(self, 'Z', [z], 'data'):
self.Z = self.Z
else:
print('ERR: Z channel not found')
return self
class Spy(object):
def __init__(self):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Author: Ali Assaf <ali.assaf.mail@gmail.com>
# Copyright: (C) 2010 Ali Assaf
# License: GNU General Public License <http://www.gnu.org/licenses/>
from itertools import product
def solve_sudoku(size, grid):
""" An efficient Sudoku solver using Algorithm X.
>>> grid = [
... [5, 3, 0, 0, 7, 0, 0, 0, 0],
... [6, 0, 0, 1, 9, 5, 0, 0, 0],
... [0, 9, 8, 0, 0, 0, 0, 6, 0],
... [8, 0, 0, 0, 6, 0, 0, 0, 3],
... [4, 0, 0, 8, 0, 3, 0, 0, 1],
... [7, 0, 0, 0, 2, 0, 0, 0, 6],
... [0, 6, 0, 0, 0, 0, 2, 8, 0],
... [0, 0, 0, 4, 1, 9, 0, 0, 5],
... [0, 0, 0, 0, 8, 0, 0, 7, 9]]
>>> for solution in solve_sudoku((3, 3), grid):
... print(*solution, sep='\\n')
[5, 3, 4, 6, 7, 8, 9, 1, 2]
[6, 7, 2, 1, 9, 5, 3, 4, 8]
[1, 9, 8, 3, 4, 2, 5, 6, 7]
[8, 5, 9, 7, 6, 1, 4, 2, 3]
[4, 2, 6, 8, 5, 3, 7, 9, 1]
[7, 1, 3, 9, 2, 4, 8, 5, 6]
[9, 6, 1, 5, 3, 7, 2, 8, 4]
[2, 8, 7, 4, 1, 9, 6, 3, 5]
[3, 4, 5, 2, 8, 6, 1, 7, 9]
"""
R, C = size
N = R * C
X = ([("rc", rc) for rc in product(range(N), range(N))] +
[("rn", rn) for rn in product(range(N), range(1, N + 1))] +
[("cn", cn) for cn in product(range(N), range(1, N + 1))] +
[("bn", bn) for bn in product(range(N), range(1, N + 1))])
Y = dict()
for r, c, n in product(range(N), range(N), range(1, N + 1)):
b = (r // R) * R + (c // C) # Box number
Y[(r, c, n)] = [
("rc", (r, c)),
("rn", (r, n)),
("cn", (c, n)),
("bn", (b, n))]
X, Y = exact_cover(X, Y)
for i, row in enumerate(grid):
for j, n in enumerate(row):
if n:
select(X, Y, (i, j, n))
for solution in solve(X, Y, []):
for (r, c, n) in solution:
grid[r][c] = n
yield grid
def exact_cover(X, Y):
X = {j: set() for j in X}
for i, row in Y.items():
for j in row:
X[j].add(i)
return X, Y
def solve(X, Y, solution):
if not X:
yield list(solution)
else:
c = min(X, key=lambda c: len(X[c]))
for r in list(X[c]):
solution.append(r)
cols = select(X, Y, r)
for s in solve(X, Y, solution):
yield s
deselect(X, Y, r, cols)
solution.pop()
def select(X, Y, r):
cols = []
for j in Y[r]:
for i in X[j]:
for k in Y[i]:
if k != j:
X[k].remove(i)
cols.append(X.pop(j))
return cols
def deselect(X, Y, r, cols):
for j in reversed(Y[r]):
X[j] = cols.pop()
for i in X[j]:
for k in Y[i]:
if k != j:
X[k].add(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
import unittest
from game_classes import Card
class TestCard(unittest.TestCase):
def test_init(self):
test_card = Card()
self.assertEqual(test_card.counter, 0)
self.assertEqual(len(test_card.selected_numbers), 15)
self.assertEqual(len(test_card.card), 3)
def test_print_card(self):
test_card = Card()
player = '#1'
print()
test_card.print_card(player)
self.assertEqual(player, '#1')
def test_check_number_in_card(self):
test_card = Card()
number = 90
player = '#1'
test_card.check_number_in_card(number, player)
self.assertEqual(test_card.counter, 0)
|
nilq/baby-python
|
python
|
# An empty class has a dictionary that ...
# holds the attributes of the object.
class A(object):
pass
A = A()
A.__dict__ = {
'key11': 1,
'key2': 2,
}
A.__dict__['key2'] = 3
print(A.__dict__['key2']) # 3
|
nilq/baby-python
|
python
|
qtde = int(input('Qual a Qtde: '))
valor = float(input('Qual valor unitário desse produto: '))
preco_total = qtde * valor
print('O preço total é: {}'.format(preco_total))
|
nilq/baby-python
|
python
|
"""Application settings."""
import os
import pydantic
class Settings(pydantic.BaseSettings):
"""Main application config.
It takes settings from environment variables.
"""
sqlalchemy_uri: str = os.environ['SQLALCHEMY_URI']
import_token: str = os.environ['AUTH_IMPORT_TOKEN']
|
nilq/baby-python
|
python
|
import math
import requests
from typing import Tuple, List
AUTH_KEY = 'GOOGLE API KEY'
PI = math.pi
LatLng = Tuple[float, float]
Polygon = List[LatLng]
"""
Various mathematical formulas for use in Google's isLocationOnEdge and containsLocation algorithms.
Unless otherwise specified all math utilities have been ported from:
Google's android-map-utils PolyUtil class:
https://github.com/googlemaps/android-maps-utils/blob/master/library/src/main/java/com/google/maps/android/PolyUtil.java
Google's android-map-utils MathUtil class:
https://github.com/googlemaps/android-maps-utils/blob/master/library/src/main/java/com/google/maps/android/MathUtil.java
"""
def decode(point_str: str) -> Polygon:
"""
The following method although present in Google's android-map-utils PolyUtil class,
this method was ported from tuvtran's PopMap placerequest.py
https://github.com/tuvtran/PopMap
Decodes a polyline that has been encoded using Google's algorithm
http://code.google.com/apis/maps/documentation/polylinealgorithm.html
This is a generic method that returns a list of (latitude, longitude)
tuples.
"""
coord_chunks = [[]]
for char in point_str:
value = ord(char) - 63
split_after = not (value & 0x20)
value &= 0x1F
coord_chunks[-1].append(value)
if split_after:
coord_chunks.append([])
del coord_chunks[-1]
coords = []
for coord_chunk in coord_chunks:
coord = 0
for i, chunk in enumerate(coord_chunk):
coord |= chunk << (i * 5)
if coord & 0x1:
coord = ~coord # invert
coord >>= 1
coord /= 100000.0
coords.append(coord)
points = []
prev_x = 0
prev_y = 0
for i in range(0, len(coords) - 1, 2):
if coords[i] == 0 and coords[i + 1] == 0:
continue
prev_x += coords[i + 1]
prev_y += coords[i]
points.append((round(prev_x, 6), round(prev_y, 6)))
return points
def intersects(lat1, lat2, lng2, lat3, lng3, geodesic):
if (lng3 >= 0 and lng3 >= lng2) or (lng3 < 0 and lng3 < lng2):
return False
if lat3 <= -PI / 2:
return False
if lat1 <= -PI / 2 or lat2 <= -PI / 2 or lat1 >= PI / 2 or lat2 >= PI / 2:
return False
if lng2 <= -PI:
return False
linear_lat = (lat1 * (lng2 - lng3) + lat2 * lng3) / lng2
if lat1 >= 0 and lat2 >= 0 and lat3 < linear_lat:
return False
if lat1 <= 0 and lat2 <= 0 and lat3 >= linear_lat:
return True
if lat3 >= PI / 2:
return True
return math.tan(lat3) >= tan_lat_gc(lat1, lat2, lng2, lng3) if geodesic else mercator(lat3) >= mercator_rhumb(
lat1, lat2, lng2, lng3)
def mercator_rhumb(lat1, lat2, lng2, lng3):
return (mercator(lat1) * (lng2 - lng3) + mercator(lat2) * lng3) / lng2
def mercator(lat):
return math.log(math.tan(lat * 0.5 + PI / 4))
def tan_lat_gc(lat1, lat2, lng2, lng3):
return (math.tan(lat1) * math.sin(lng2 - lng3) + math.tan(lat2) * math.sin(lng3)) / math.sin(lng2)
def to_radians(degrees):
return degrees * PI / 180
def wrap(n, minimum, maximum):
return n if minimum <= n < maximum else mod(n - minimum, maximum - minimum) + minimum
def mod(x, m):
return ((x % m) + m) % m
def hav(x):
sin_half = math.sin(x * 0.5)
return sin_half * sin_half
def clamp(x, low, high):
return low if x < low else (high if x > high else x)
def hav_distance(lat1, lat2, d_lng):
return hav(lat1 - lat2) + hav(d_lng) * math.cos(lat1) * math.cos(lat2)
def inverse_mercator(y):
return 2.0 * math.atan(math.exp(y)) - 1.5707963267948966
def sin_delta_bearing(lat1, lng1, lat2, lng2, lat3, lng3):
sin_lat1 = math.sin(lat1)
cos_lat2 = math.cos(lat2)
cos_lat3 = math.cos(lat3)
lat31 = lat3 - lat1
lng31 = lng3 - lng1
lat21 = lat2 - lat1
lng21 = lng2 - lng1
a = math.sin(lng31) * cos_lat3
c = math.sin(lng21) * cos_lat2
b = math.sin(lat31) + 2.0 * sin_lat1 * cos_lat3 * hav(lng31)
d = math.sin(lat21) + 2.0 * sin_lat1 * cos_lat2 * hav(lng21)
denom = (a * a + b * b) * (c * c + d * d)
return 1.0 if denom <= 0.0 else (a * d - b * c) / math.sqrt(denom)
def sin_sum_from_hav(x, y):
a = math.sqrt(x * (1.0 - x))
b = math.sqrt(y * (1.0 - y))
return 2.0 * (a + b - 2.0 * (a * y + b * x))
def hav_from_sin(x):
x2 = x * x
return x2 / (1.0 + math.sqrt(1.0 - x2)) * 0.5
def sin_from_hav(h):
return 2.0 * math.sqrt(h * (1.0 - h))
"""
Methods below have not been imported from any standalone API or package and simply
exist to aide in the function of this entire package
"""
def within_city_bounds(origin: LatLng, destination: LatLng) -> bool:
la_bounds = [(33.8641899712294, -118.281468637671), (33.8627792, -118.2814372),
(33.862734758137, -118.281534783721),
(33.8415, -118.2825), (33.8415, -118.2965), (33.8135, -118.293), (33.803, -118.2965),
(33.803, -118.2685), (33.81, -118.265), (33.81, -118.2545), (33.803, -118.251), (33.7995, -118.23),
(33.81, -118.2265), (33.824, -118.2335), (33.8345, -118.23), (33.8345, -118.223), (33.824, -118.2195),
(33.789, -118.223), (33.7855, -118.216), (33.7785, -118.216), (33.7645, -118.2405), (33.754, -118.237),
(33.754, -118.244), (33.7155, -118.2265), (33.6875, -118.223), (33.6875, -118.237), (33.67, -118.251),
(33.6595, -118.272), (33.656, -118.321), (33.6595, -118.349), (33.67, -118.3665), (33.7295, -118.335),
(33.733, -118.3245), (33.7505, -118.321), (33.7505, -118.314), (33.8695, -118.314),
(33.873, -118.2965),
(33.9465, -118.2965), (33.936, -118.3035), (33.936, -118.3175), (33.9675, -118.321),
(33.964, -118.335),
(33.978, -118.3385), (33.978, -118.3665), (33.9605, -118.3665), (33.957, -118.3735),
(33.957, -118.3665),
(33.9325, -118.363), (33.9255, -118.3665), (33.929, -118.4225), (33.9115, -118.419),
(33.9115, -118.503),
(33.9535, -118.5275), (33.964, -118.5415), (33.971, -118.5415), (34.0165, -118.4505),
(34.0235, -118.454), (34.041, -118.475), (34.0375, -118.4855), (34.0445, -118.4925),
(33.9815, -118.552),
(33.985, -118.573), (34.041, -118.5695), (34.0655, -118.573), (34.069, -118.601), (34.076, -118.6045),
(34.1285, -118.5695), (34.1425, -118.608), (34.1425, -118.6325), (34.16, -118.6465),
(34.167, -118.664),
(34.174, -118.664), (34.1775, -118.671), (34.2125, -118.671), (34.216, -118.664), (34.2405, -118.65),
(34.2405, -118.636), (34.272, -118.636), (34.279, -118.629), (34.279, -118.5975), (34.307, -118.5905),
(34.3, -118.5485), (34.3105, -118.552), (34.321, -118.5485), (34.3175, -118.5345), (34.342, -118.5065),
(34.335, -118.4925), (34.335, -118.405), (34.3245, -118.4015), (34.321, -118.391),
(34.3035, -118.4015),
(34.3035, -118.384), (34.2895, -118.377), (34.2895, -118.3665), (34.2825, -118.3595),
(34.2895, -118.3035), (34.2965, -118.3035), (34.2965, -118.2825), (34.2825, -118.2825),
(34.286, -118.2755), (34.2825, -118.2335), (34.265, -118.2335), (34.2615, -118.251),
(34.251, -118.251),
(34.2475, -118.2615), (34.2195, -118.2615), (34.216, -118.3315), (34.202, -118.3385),
(34.1985, -118.3595), (34.167, -118.3525), (34.1495, -118.342), (34.16, -118.3245), (34.16, -118.314),
(34.167, -118.3105), (34.16, -118.2755), (34.125, -118.258), (34.1285, -118.2405), (34.139, -118.2405),
(34.139, -118.2335), (34.153, -118.23), (34.1495, -118.209), (34.1565, -118.195), (34.153, -118.181),
(34.141965071875, -118.181), (34.1418339, -118.180908), (34.1412999, -118.180757),
(34.1412019, -118.180646), (34.1411289, -118.180513), (34.1410909, -118.180082),
(34.1408809, -118.180097), (34.1408179, -118.180198), (34.1407129, -118.180766),
(34.1407352709369, -118.181), (34.132, -118.181), (34.1285, -118.1635), (34.118, -118.1635),
(34.111, -118.167), (34.111, -118.174), (34.104, -118.174), (34.104, -118.153), (34.0585, -118.16),
(34.0585, -118.188), (34.0095, -118.188), (34.0095, -118.237), (33.985, -118.2335), (33.985, -118.251),
(33.957, -118.251), (33.957, -118.23), (33.95, -118.223), (33.9255, -118.2265), (33.9255, -118.251),
(33.9185, -118.251), (33.9185, -118.279)]
return inside_polygon(origin, la_bounds) and inside_polygon(destination, la_bounds)
def find_distance(latlng1: LatLng, latlng2: LatLng) -> float:
"""
Computes the distance between two tuples of
latitude and longitudes in meters
"""
lat1 = latlng1[0]
lng1 = latlng1[1]
lat2 = latlng2[0]
lng2 = latlng2[1]
earth_radius = 6371.00
phi1 = to_radians(lat1)
phi2 = to_radians(lat2)
delta_phi = to_radians(lat2 - lat1)
delta_lambda = to_radians(lng2 - lng1)
haversine_a = math.sin(delta_phi / 2) * math.sin(delta_phi / 2) + math.cos(phi1) * math.cos(phi2) * math.sin(
delta_lambda / 2) * math.sin(delta_lambda / 2)
haversine_c = 2 * math.atan2(math.sqrt(haversine_a), math.sqrt(1 - haversine_a))
haversine_d = (earth_radius * haversine_c) * 1000
return haversine_d
def inside_polygon(point, polygon):
n = len(polygon)
inside = False
x = point[0]
y = point[1]
p1x, p1y = polygon[0]
for i in range(n + 1):
x_ints = 0
p2x, p2y = polygon[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
x_ints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= x_ints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def get_accidents(lat: float, lng: float, radius: float, buckets: dict, decoded_polyline: List) -> List[dict]:
near_accidents = []
tagged_accidents = []
tagged_buckets_keys = []
tagged_accidents.extend(buckets['b0']['accidents'])
for point in decoded_polyline:
for key in buckets:
if key not in tagged_buckets_keys and key != 'b0':
bucket = buckets[key]
if inside_polygon((point[1], point[0]), bucket['bucket_border']):
tagged_buckets_keys.append(key)
tagged_accidents.extend(bucket['accidents'])
break
for tagged_accident in tagged_accidents:
if find_distance((lat, lng), (tagged_accident['lat'], tagged_accident['lng'])) <= radius:
near_accidents.append(tagged_accident)
return near_accidents
def find_directions(origin: LatLng, destination: LatLng, method: str) -> dict:
parameters = {
"origin": f'{origin[0]},{origin[1]}',
"destination": f'{destination[0]},{destination[1]}',
"mode": method,
"alternatives": "true",
"key": AUTH_KEY
}
# print(f'https://maps.googleapis.com/maps/api/directions/json?origin={origin[0]},{origin[1]}&destination={destination[0]},{destination[1]}&mode={method}&alternative=true&key={AUTH_KEY}')
response = requests.get(
"https://maps.googleapis.com/maps/api/directions/json?", params=parameters)
json_data = response.json()
status = json_data["status"]
if status == 'OK':
return {'status': 'OK', 'routes': json_data['routes']}
elif status == 'ZERO_RESULTS':
return {'status': 'ZERO_RESULTS',
'user_error_msg': 'SafeWays API Found No SafePaths for the Origin-Destination Combination',
'log_error_google': 'Google Directions API found zero results'}
elif status == 'REQUEST_DENIED':
return {'status': 'REQUEST_DENIED',
'user_error_msg': 'SafeWays API Encountered an Internal Key Validation Error',
'log_error_google': json_data["error_message"]}
else:
return {'status': 'SERVER_SIDE_ERROR', 'user_error_msg': 'SafeWays API Encountered a Internal Server Error',
'log_error_google': json_data["error_message"]}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamlit/proto/DeckGlJsonChart.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='streamlit/proto/DeckGlJsonChart.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n%streamlit/proto/DeckGlJsonChart.proto\"M\n\x0f\x44\x65\x63kGlJsonChart\x12\x0c\n\x04json\x18\x01 \x01(\t\x12\x0f\n\x07tooltip\x18\x02 \x01(\t\x12\x1b\n\x13use_container_width\x18\x04 \x01(\x08\x62\x06proto3'
)
_DECKGLJSONCHART = _descriptor.Descriptor(
name='DeckGlJsonChart',
full_name='DeckGlJsonChart',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='json', full_name='DeckGlJsonChart.json', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tooltip', full_name='DeckGlJsonChart.tooltip', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='use_container_width', full_name='DeckGlJsonChart.use_container_width', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=118,
)
DESCRIPTOR.message_types_by_name['DeckGlJsonChart'] = _DECKGLJSONCHART
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeckGlJsonChart = _reflection.GeneratedProtocolMessageType('DeckGlJsonChart', (_message.Message,), {
'DESCRIPTOR' : _DECKGLJSONCHART,
'__module__' : 'streamlit.proto.DeckGlJsonChart_pb2'
# @@protoc_insertion_point(class_scope:DeckGlJsonChart)
})
_sym_db.RegisterMessage(DeckGlJsonChart)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import os
def skip_if_environ(name):
if name in os.environ:
def skip_inner(func):
return lambda x: None
return skip_inner
def inner(func):
return func
return inner
|
nilq/baby-python
|
python
|
from selenium import webdriver
import pandas as pd
import time
import os
# load product file
product = pd.read_csv('../dataset/glowpick_products.csv')
# urls
product_urls = product.product_url.unique()
url = 'https://www.glowpick.com'
# driver
driver = webdriver.Chrome()
# information dataframe
info_df = pd.DataFrame()
# if there's file, load file and concatenate
if os.path.isfile('../dataset/glowpick_info.csv'):
df = pd.read_csv('../dataset/glowpick_info.csv')
info_df = pd.concat([info_df, df], axis=0)
print('out info_df.shape: ',info_df.shape)
# crawling information of product
for p_url in product_urls:
print('='*100)
print('in info_df.shape: ',info_df.shape)
print('product url: ',p_url)
driver.get(url + p_url)
driver.implicitly_wait(5)
# if category in total df, continue
if info_df.shape[0] > 0:
if p_url in info_df.product_url.unique():
continue
# name
name = driver.find_element_by_xpath('//*[@id="gp-product-detail"]/div/ul[1]/li[2]/div/section[1]/h1/span').text
print('product: ',name)
# description
describe = driver.find_element_by_css_selector('.product-detail__description-box.product-detail__tr > td > div').text
print('describe: ',describe)
# tags
tags = driver.find_element_by_css_selector('.product-detail__tag-list.product-detail__tr > td > p')
spans = tags.find_elements_by_tag_name('span')
t_lst = []
for span in spans:
t_lst.append(span.text)
tags = '/'.join(t_lst)
print('tags: ',tags)
# make dataframe
df = pd.DataFrame({'product_url':[p_url],
'description':[describe],
'tag':[tags]})
info_df = pd.concat([info_df, df], axis=0)
info_df.to_csv('../dataset/glowpick_info.csv', index=False)
print()
|
nilq/baby-python
|
python
|
import os
import sys
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
SECRET_KEY = '@$n=(b+ih211@e02_kup2i26e)o4ovt6ureh@xbkfz!&@b(hh*'
DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'django_extensions',
'picker.apps.PickerConfig',
'demo',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'demo/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'demo.context_processors.demo',
],
},
}]
WSGI_APPLICATION = 'demo.wsgi.application'
DATABASES = {'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.environ.get('DEMO_DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')),
}}
SITE_ID = 1
ROOT_URLCONF = 'demo.urls'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
DEMO = {
'dump_post_data': True
}
PICKER = {
'FAKE_DATETIME_NOW': None,
'NFL': {
'TEAM_PICKER_WIDGET': 'demo.forms.TemplateTeamChoice',
},
'HQ': {
'TEAM_PICKER_WIDGET': 'demo.forms.TemplateTeamChoice',
}
}
from freezegun import freeze_time
freezer = freeze_time("2019-09-14 12:00:01")
freezer.start()
|
nilq/baby-python
|
python
|
from django import forms
from .models import AddressEntry
class AddressEntryForm(forms.ModelForm):
class Meta:
model = AddressEntry
fields = [
'address',
]
|
nilq/baby-python
|
python
|
"""
Lines 5 and 6 were adapted from SO code:
http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python
"""
import sys
sys.path.insert(0, '..')
""" END """
import main as program
import pytest
def test_int_0():
assert '0' == program._get_binary(0,1)
def test_int_5():
assert '101'== program._get_binary(5,3)
def test_int_1_with_larger_r():
assert '00001' == program._get_binary(1,5)
|
nilq/baby-python
|
python
|
from enum import Enum, auto
class DatabaseActionType(Enum):
WRITE_DATA_STORAGE = auto() # Writes do not require a response on the request
WRITE_STORAGE_INDEX = auto()
READ_CONNECTED_DEVICES = auto() # Reads need response to get requested data
READ_DEVICE = auto() # RPC CALL
DELETE_OLD_DATA = auto()
|
nilq/baby-python
|
python
|
import os
import imp
import setuptools
version = imp.load_source("ssh2.version", os.path.join("ssh2", "version.py")).version
setuptools.setup(
name="python-ssh",
version=version,
packages=setuptools.find_packages(include=["ssh2", "ssh2.*"]),
package_dir={"ssh2": "ssh2"},
license="MIT",
author="Deric Degagne",
author_email="deric.degagne@gmail.com",
description="A library to execute commands on remote hosts.",
url="https://github.com/degagne/python-ssh",
project_urls={
"Bug Tracker": "https://github.com/degagne/python-ssh/issues",
"Documentation": "https://python-ssh.readthedocs.io/en/latest/index.html"
},
install_requires=[
"paramiko",
"rich"
],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""Demo on how to run the simulation using the Gym environment
This demo creates a SimRearrangeDiceEnv environment and runs one episode using
a dummy policy.
"""
from rrc_example_package import rearrange_dice_env
from rrc_example_package.example import PointAtDieGoalPositionsPolicy
def main():
env = rearrange_dice_env.SimRearrangeDiceEnv(
goal=None, # passing None to sample a random goal
action_type=rearrange_dice_env.ActionType.POSITION,
visualization=True,
)
is_done = False
observation = env.reset()
t = 0
policy = PointAtDieGoalPositionsPolicy(env.action_space, env.current_goal)
while not is_done:
action = policy.predict(observation, t)
observation, reward, is_done, info = env.step(action)
t = info["time_index"]
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#
# Copyright (c) 2019-2021 Ruben Perez Hidalgo (rubenperez038 at gmail dot com)
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
import requests
from bs4 import BeautifulSoup
import os
from os import path
REPO_BASE = path.abspath(path.join(path.dirname(__file__), '..'))
DOC_PATH = path.join(REPO_BASE, 'doc', 'html')
def list_doc_files():
all_files = []
for base_dir, _, files in os.walk(DOC_PATH):
all_files += [path.join(base_dir, f) for f in files if f.endswith('.html')]
return all_files
def get_href(elm, current_file):
try:
res = elm['href']
except KeyError:
return None
if res.startswith('http://') or res.startswith('https://'):
if '#error_er_' in res:
return res.split('#error_er_')[0]
else:
return res
else:
curdir = path.dirname(current_file)
return path.realpath(path.join(curdir, res.split('#')[0]))
def extract_links():
external_links = {}
internal_links = {}
for fname in list_doc_files():
with open(fname, 'rt') as f:
html_doc = f.read()
soup = BeautifulSoup(html_doc, 'html.parser')
links = [get_href(elm, fname) for elm in soup.find_all('a')]
internal_links.update({ elm: fname for elm in links if elm is not None and elm.startswith('/')})
external_links.update({ elm: fname for elm in links if elm is not None and \
(elm.startswith('http://') or elm.startswith('https://'))})
return (external_links, internal_links)
def check_external_links(links):
s = requests.Session()
for url in sorted(links.keys()):
print('Checking ', url)
response = s.head(url, allow_redirects=True)
if response.status_code != 200:
print(' ++++ {} response code: {}'.format(url, response.status_code))
def check_internal_links(links):
for target, link_file in links.items():
if not path.exists(target):
print(' ++++ Link {} in file {} does not exist'.format(target, link_file))
def main():
external, internal = extract_links()
check_external_links(external)
check_internal_links(internal)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
Copyright 2016 Stephen Boyd, Enzo Busseti, Steven Diamond, BlackRock Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import cvxpy as cvx
import numpy as np
import pandas as pd
from ..policies import SinglePeriodOpt
from ..costs import HcostModel, TcostModel
from ..returns import ReturnsForecast
from ..risks import FullSigma
from .base_test import BaseTest
DIR = os.path.dirname(__file__) + os.path.sep
class TestOptimizer(BaseTest):
def setUp(self):
self.sigma = pd.read_csv(DIR+'sigmas.csv',
index_col=0, parse_dates=[0])
self.returns = pd.read_csv(DIR+'returns.csv',
index_col=0, parse_dates=[0])
self.volume = pd.read_csv(DIR+'volumes.csv',
index_col=0, parse_dates=[0])
self.a, self.b, self.s = 0.0005, 1., 0.
self.s = self.s + 1e-3
self.universe = self.returns.columns
self.times = self.returns.index
def test_single_period_opt(self):
"""Test single period optimizer.
"""
# Alpha source
gamma = 100.
n = len(self.universe)
alpha_model = ReturnsForecast(self.returns)
emp_Sigma = np.cov(self.returns.as_matrix().T) + np.eye(n)*1e-3
risk_model = FullSigma(emp_Sigma)
tcost_model = TcostModel(0, self.b, self.sigma, self.volume, power=2)
hcost_model = HcostModel(self.s*0, self.s)
pol = SinglePeriodOpt(alpha_model,
[gamma*risk_model, tcost_model, hcost_model],
[], solver=cvx.ECOS)
t = self.times[1]
p_0 = pd.Series(index=self.universe, data=1E6)
z = pol.get_trades(p_0, t)
self.assertAlmostEqual(z.sum(), 0)
# Compare with CP calculation.
h = z + p_0
rho = self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t])
rho = np.hstack([rho, 0])
A = 2*gamma*emp_Sigma + 2*np.diag(rho)
s_val = pd.Series(index=self.returns.columns, data=self.s)
s_val['cash'] = 0.
b = self.returns.loc[t] + 2*rho*(p_0/sum(p_0)) + s_val
h0 = np.linalg.solve(A, b)
offset = np.linalg.solve(A, np.ones(n))
nu = (1 - h0.sum())/offset.sum()
hstar = h0 + nu*offset
self.assertAlmostEqual(hstar.sum(), 1)
self.assertItemsAlmostEqual(h/sum(p_0), hstar, places=4)
# def test_multi_period(self):
# """Test multiperiod optimizer.
# """
# # Alpha source
# bmark = pd.Series(index=self.universe, data=0.)
# bmark.cash=1
# gamma = 100.
# n = len(self.universe)
# alpha_model = ReturnsForecast(self.returns)
# emp_Sigma = np.cov(self.returns.as_matrix().T) + np.eye(n)*1e-3
# risk_model = FullSigma(emp_Sigma,gamma_half_life=np.inf)
# tcost_model = TcostModel(self.volume, self.sigma,
# self.a*0, self.b, power=2)
# hcost_model = HcostModel(self.s*0, self.s)
# pol = MultiPeriodOpt(list(self.times)[:3], bmark, 2, alpha_model,
# [gamma*risk_model, tcost_model, hcost_model],
# [], solver=cvx.ECOS)
#
# t = self.times[1]
# p_0 =pd.Series(index=self.universe, data=1E6)
# z = pol.get_trades(p_0, t)
# self.assertAlmostEqual(z.sum(), 0)
# # Compare with CP calculation. Terminal constraint.
# h = z + p_0
# rho=self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t])
# rho=np.hstack([rho, 0])
# A = 2*gamma*emp_Sigma + 4*np.diag(rho)
# s_val = self.s.loc[t]
# s_val['cash'] = 0
# b = self.returns.loc[t] + 2*rho*(p_0/sum(p_0) + bmark) + s_val
# h0 = np.linalg.solve(A, b) + bmark
# offset = np.linalg.solve(A, np.ones(n))
# nu = (1 - h0.sum())/offset.sum()
# hstar = h0 + nu*offset
# self.assertAlmostEqual(hstar.sum(), 1)
# self.assertItemsAlmostEqual(h/sum(p_0), hstar, places=4)
#
#
# pol = MultiPeriodOpt(2, alpha_model, [risk_model, tcost_model,
# hcost_model], [], solver=cvx.ECOS,
# terminal_constr=False)
#
# t = self.times[1]
# p_0 = pd.Series(index=self.universe, data=1E6)
# z = pol.get_trades(p_0, t)
# self.assertAlmostEqual(z.sum(), 0)
# # Compare with CP calculation.
# h = z + p_0
# rho = self.b*self.sigma.loc[t]*(sum(p_0)/self.volume.loc[t])
# rho = np.hstack([rho, 0])
# D = np.diag(rho)
# A = np.bmat([[2*gamma*emp_Sigma + 4*D, -2*D, np.ones((n,1)),
# np.zeros((n,1))],
# [-2*D, 2*gamma*emp_Sigma, np.zeros((n,1)),
# np.ones((n,1))],
# [np.ones((1,n)), np.zeros((1,n+2))],
# [np.zeros((1,n)), np.ones((1, n)), np.zeros((1,2))]])
# s_val = self.s.loc[t]
# s_val['cash'] = 0
# b = self.returns.loc[t] + 2*rho*p_0/sum(p_0) + s_val
# b = np.hstack([b, self.returns.loc[t] + s_val, 1, 1])
# x = np.linalg.solve(A, b)
# w1 = x[:n]
# w2 = x[n:2*n]
# self.assertAlmostEqual(w1.sum(), 1)
# self.assertAlmostEqual(w2.sum(), 1)
# self.assertItemsAlmostEqual(h/sum(p_0), w1, places=4)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.1 on 2021-05-09 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tenure', models.FloatField()),
('preferredlogindevice', models.CharField(max_length=20)),
('citytier', models.FloatField()),
('warehousetohome', models.FloatField()),
('preferredpaymenthome', models.CharField(max_length=20)),
('gender', models.CharField(max_length=10)),
('hourspendonapp', models.FloatField()),
('numberofdeviceregistered', models.FloatField()),
('preferedordercat', models.CharField(max_length=20)),
('satisfactionscore', models.FloatField()),
('maritalstatus', models.CharField(max_length=20)),
('noofaaddress', models.FloatField()),
('complain', models.FloatField()),
('orderamounthikefromlastyear', models.FloatField()),
('couponused', models.FloatField()),
('ordercount', models.FloatField()),
('daysincelastorder', models.FloatField()),
('cashbackamount', models.FloatField()),
('userid', models.CharField(max_length=30)),
('password', models.CharField(max_length=20)),
],
),
]
|
nilq/baby-python
|
python
|
from .BaseNeuralBatch import BaseNeuralBatch
from ..nu import v1
from .. import Ports
import numpy as np
class CubicBatch(BaseNeuralBatch):
def __init__(
self,
name,
parent,
cell_pos,
shape,
unit_distance,
nu_type=v1,
receive_modulators=False,
nu_params={}):
super().__init__(
name,
parent,
cell_pos,
nu_type,
receive_modulators,
nu_params)
self.unit_distance = unit_distance
self.shape = shape
start_pos = (
self.cell_pos[0] - (self.shape[0] - 1) / 2 * self.unit_distance,
self.cell_pos[1] - (self.shape[1] - 1) / 2 * self.unit_distance,
self.cell_pos[2] - (self.shape[2] - 1) / 2 * self.unit_distance)
i = 0
for z in range(self.shape[2]):
for y in range(self.shape[1]):
for x in range(self.shape[0]):
pos = (start_pos[0] + x * self.unit_distance,
start_pos[1] + y * self.unit_distance,
start_pos[2] + z * self.unit_distance)
unit_name = self.name + "-NU-" + str(i)
temp_nu = self.nu_type(unit_name, self, pos, **self.nu_params)
if self.receive_modulators:
self.couple(
self.in_ports[Ports.NEUROMODULATORS],
temp_nu.in_ports[Ports.NEUROMODULATORS])
i += 1
def interconnect_full(self, synapse_type, synapse_params):
for src_unit in self.children:
for target_unit in self.children:
if src_unit is not target_unit:
new_synapse = target_unit.connect(
src_unit, synapse_type, synapse_params)
self.couple(
src_unit.out_ports[Ports.AP],
target_unit.in_ports[new_synapse.name])
def interconnect_prob(self, prob, synapse_type, synapse_params):
for src_unit in self.children:
for target_unit in self.children:
if src_unit is not target_unit:
if np.random.uniform() <= prob:
new_synapse = target_unit.connect(
src_unit, synapse_type, synapse_params)
self.couple(
src_unit.out_ports[Ports.AP],
target_unit.in_ports[new_synapse.name])
def get_index_from_pos(self, pos):
return pos[0] + pos[1] * self.shape[0] + pos[2] * self.shape[0] * self.shape[1]
def __getitem__(self, index):
if type(index) == int:
return self.children[index]
elif len(index) == 3:
output = []
for z in range(index[2].start, index[2].stop, index[2].step):
for y in range(index[1].start, index[1].stop, index[1].step):
for x in range(index[0].start, index[0].stop, index[0].step):
output.append(self[self.get_index_from_pos((x, y, z))])
return output
else:
raise ValueError(
"CubicBatch only receives tuple of length 1 or 3.")
def __str__(self):
output = self.name + "\n"
output += "unit_distance: "+str(self.unit_distance) + "\n"
output += "cell_pos: "+str(self.cell_pos) + "\n"
output += "shape: " + str(self.cell_pos) + "\n"
temp = str(self.membrane_type).split(".")
output += "membrane_type: " + temp[len(temp) - 1][:-2] + "\n"
for key in self.membrane_params:
if type(self.membrane_params[key]) == type:
temp = str(self.membrane_params[key]).split(".")
output += key + ": " + temp[len(temp) - 1][:-2] + "\n"
else:
output += key + ": " + str(self.membrane_params[key]) + "\n"
return output
def asNumpyArr(self):
arr = np.empty(self.shape)
for z in range(0, self.shape[2]):
for y in range(0, self.shape[1]):
for x in range(0, self.shape[0]):
weights = [
s.w for s in self[self.get_index_from_pos((x, y, z))].synapses]
avg = sum(weights)/len(weights) if not len(weights) == 0 else 0
arr[x, y, z] = avg
return arr
|
nilq/baby-python
|
python
|
from datetime import datetime
import discord
from discord.ext import commands
class cat_debug(commands.Cog, name="Debug commands"):
"""Documentation"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def tell_me_about_yourself(self, ctx):
print(
f"[{datetime.now()}] Command Issued: tell_me_about_yourself\n - message: {ctx.message.content}\n - debug: {ctx.message}"
)
text = "My name is XikoBot!\n. My creator is XikoCat. Check him out on twitter: https://twitter.com/xikocat\nType %help, to get a list of commands.\n :)"
await ctx.send(text)
@commands.command(help="Prints details of Author")
async def whats_my_name(self, ctx):
print(
f"[{datetime.now()}] Command Issued: whats_my_name\n - message: {ctx.message.content}\n - debug: {ctx.message}"
)
await ctx.send(f"Hello {ctx.author.name}")
@commands.command(help="Prints details of Server")
async def where_am_i(self, ctx):
print(
f"[{datetime.now()}] Command Issued: where_am_i\n - message: {ctx.message.content}\n - debug: {ctx.message}"
)
owner = str(ctx.guild.owner)
region = str(ctx.guild.region)
guild_id = str(ctx.guild.id)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
desc = ctx.guild.description
embed = discord.Embed(
title=ctx.guild.name + " Server Information",
description=desc,
color=discord.Color.blue(),
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Owner", value=owner, inline=True)
embed.add_field(name="Server ID", value=guild_id, inline=True)
embed.add_field(name="Region", value=region, inline=True)
embed.add_field(name="Member Count", value=memberCount, inline=True)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(cat_debug(bot))
|
nilq/baby-python
|
python
|
'''This module computes '''
import argparse
import csv
import io
import os.path
from datetime import datetime
from urllib.request import urlopen
from stockjournal.operator import gmean
csv_header = "Date,Open,High,Low,Close,Volume,Adj Close"
parser = argparse.ArgumentParser(description='Stock stats tool using data \
from Yahoo Finance service or local file.')
parser.add_argument('src',
help="csv file with Yahoo Finance format (%s) or \
a valid stock symbol name to fetch from Yahoo Finance" % csv_header)
# months in yahoo finance starts from 0
# http://chart.finance.yahoo.com/table.csv?s=JPM&a=11&b=30&c=1983&d=1&e=16&f=2017&g=d&ignore=.csv
# all:
## "http://chart.finance.yahoo.com/table.csv?s=JPM&d=1&e=16&f=2017&g=d&ignore=.csv"
def read_from_yahoo(name):
now = datetime.now()
params = "s={}&d={}&e={}&f={}&g=d&ignore=.csv".format(
name.upper(), now.month - 1, now.day, now.year)
url = 'http://chart.finance.yahoo.com/table.csv?' + params
with urlopen(url) as f:
return get_values(io.TextIOWrapper(f, encoding='ascii'))
def read_from_file(filename):
with open(filename) as f:
return get_values(f)
def get_values(resource):
h = resource.readline()[:-1]
assert h == csv_header,\
'csv header must be:\n%s got:\n%s' % (csv_header, h)
reader = csv.reader(resource)
vals = [float(r[4]) for r in reader]
return vals
def main():
args = parser.parse_args()
if os.path.exists(args.src):
vals = read_from_file(args.src)
else:
try:
vals = read_from_yahoo(args.src)
except Exception as e:
print("Can't get the stock data from Yahoo Finance service.\
Probably the stock code is wrong: %s\n" % args.src, e)
return
print(gmean(vals))
main()
|
nilq/baby-python
|
python
|
# Author: Smit Patel
# Date: 25/07/2018
# File: chatbot_trainer.py
# Licence: MIT
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
import os
bot = ChatBot('Bot')
bot.set_trainer(ListTrainer)
while True:
message = input('You:')
if message.strip() != 'Bye':
reply = bot.get_response(message)
print('ChatBot :', reply)
if message.strip() == 'Bye':
print('ChatBot : Bye, see u again')
break
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#
'''
### Desafio de request de url ###
Extrair o nono e o quarto campos do arquivo CSV
sobre região de influencia das Cidades
Ignorar a primeira linha que é o cabechalho do arquivo
dados = entrada.read().decode('latin1')
Arquivo IBGE esta no formato ISO-8859-1 (aka latin1)
Essa linha baixa o arquivo para a memoria do computador
for cidade in csv.reader(dados.splitlines()):
Sem o uso do splitlines,
o csv.reader vai processar caracter por caracter (e não linha por linha),
desde forma a variável linhas sempre terá apenas um elemento,
e por isso linhas[8] ou linhas[3] vai levantar a exceção:
list index out of range
9 Coluna = Indice 8
4 Coluna = Indice 3
read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv')
Faz com que o python nao interprete de forma indevida os caracteres
da url exemplo de uso, imprimindo o caractere \n
print(\\n\\n\\n) # OU
print(r'\n\n\n')
'''
import csv
from urllib import request
def read(url):
with request.urlopen(url) as entrada:
print('Baixando o CSV...')
dados = entrada.read().decode('latin1')
print('Download completo!')
for cidade in csv.reader(dados.splitlines()):
print(f'{cidade[8]}: {cidade[3]}')
if __name__ == '__main__':
read(r'http://files.cod3r.com.br/curso-python/desafio-ibge.csv')
# Fontes:
# Curso Python 3 - Curso Completo do Básico ao Avançado Udemy Aula 97 a 107
# https://github.com/cod3rcursos/curso-python/tree/master/manipulacao_arquivos
|
nilq/baby-python
|
python
|
class TennisGame():
def __init__(self, first_player_name="player1", second_player_name="player2"):
self.first_player_name = first_player_name
self.second_player_name = second_player_name
self.first_player_score = 0
self.second_player_score = 0
@property
def first_player_score(self):
return self._first_player_score
@first_player_score.setter
def first_player_score(self, score):
self._first_player_score = score
@property
def second_player_score(self):
return self._second_player_score
@second_player_score.setter
def second_player_score(self, score):
self._second_player_score = score
def score(self):
self.score_lookup = {
"0": "Love",
"1": "Fifteen",
"2": "Thirty",
"3": "Forty",
"4": "Adv",
"5": "Win"
}
if self.is_same_score():
return self.get_high_same_score_result() if self.is_both_score_high_than_forty() else self.get_low_same_score_result()
else:
return self.get_high_diff_score_result() if self.is_both_score_high_than_forty() else self.get_low_diff_score_result()
def get_low_same_score_result(self):
return f"{self.score_lookup[str(self.first_player_score)]}-All"
def get_low_diff_score_result(self):
return f"{self.score_lookup[str(self.first_player_score)]}-{self.score_lookup[str(self.second_player_score)]}"
def get_high_same_score_result(self):
return "Deuce"
def get_high_diff_score_result(self):
return f"{self.get_winner()} {self.get_adv_statue_by_max_score()}"
def get_winner(self):
return self.first_player_name if self.first_player_score > self.second_player_score else self.second_player_name
def get_adv_statue_by_max_score(self):
return self.score_lookup[str(max(self.first_player_score, self.second_player_score))]
def is_both_score_high_than_forty(self):
return min(self.first_player_score, self.second_player_score) >= 3
def is_same_score(self):
return self.first_player_score == self.second_player_score
|
nilq/baby-python
|
python
|
# @Time : 2020/11/14
# @Author : Gaole He
# @Email : hegaole@ruc.edu.cn
# UPDATE:
# @Time : 2020/12/3
# @Author : Tianyi Tang
# @Email : steventang@ruc.edu.cn
# UPDATE
# @Time : 2021/4/12
# @Author : Lai Xu
# @Email : tsui_lai@163.com
"""
textbox.evaluator.bleu_evaluator
#######################################
"""
import numpy as np
from fast_bleu import BLEU
from textbox.evaluator.sentence_bleu import sentence_bleu, SmoothingFunction
from textbox.evaluator.abstract_evaluator import AbstractEvaluator
class BleuEvaluator(AbstractEvaluator):
r"""Bleu Evaluator. Now, we support metrics `'bleu'`
"""
def __init__(self, task_type):
self.n_grams = [1, 2, 3, 4]
self.task_type = task_type
self.weights = self._generate_weights()
def _generate_weights(self):
weight = [0] * max(self.n_grams)
weights = {}
for n_gram in self.n_grams:
weight[n_gram - 1] = 1.0
weights['bleu-{}'.format(n_gram)] = tuple(weight)
weight[n_gram - 1] = 0.0
avg_weight = [1. / n_gram] * n_gram
avg_weight.extend([0. for index in range(max(self.n_grams) - n_gram)])
weights['bleu-{}-avg'.format(n_gram)] = tuple(avg_weight)
return weights
def _calc_fast_bleu(self, generate_corpus, reference_corpus):
r""" Calculate the BLEU metrics of the generated corpus in referenced corpus.
Args:
generate_corpus (List[List[str]]): the generated corpus
reference_corpus (List[List[str]]): the referenced corpus
n_grams (List): the n-gram metric to be calculated
Returns:
list: the BLEU results and average BLEU scores
"""
bleu = BLEU(reference_corpus, self.weights)
scores = bleu.get_score(generate_corpus)
return scores
def _calc_metrics_info(self, generate_corpus, reference_corpus):
r"""get metrics result
Args:
generate_corpus: the generated corpus
reference_corpus: the referenced corpus
Returns:
dict: a dict of metrics <metric> which record the results according to self.n_grams
"""
bleu_dict = {}
for n_gram in self.n_grams:
bleu_dict['bleu-{}'.format(n_gram)] = []
for n_gram in self.n_grams:
bleu_dict['bleu-{}-avg'.format(n_gram)] = []
if self.task_type:
results = self._calc_fast_bleu(generate_corpus=generate_corpus, reference_corpus=reference_corpus)
for n_gram in self.n_grams:
bleu_dict['bleu-{}'.format(n_gram)].append(np.array(results['bleu-{}'.format(n_gram)]).mean())
bleu_dict['bleu-{}-avg'.format(n_gram)].append(np.array(results['bleu-{}-avg'.format(n_gram)]).mean())
else:
for i in range(len(generate_corpus)):
pred_sent = generate_corpus[i]
gold_sent = reference_corpus[i]
results = sentence_bleu(
hypothesis=pred_sent,
references=[gold_sent],
weights=self.weights,
smoothing_function=SmoothingFunction().method1
)
for n_gram in self.n_grams:
bleu_dict['bleu-{}'.format(n_gram)].append(np.array(results['bleu-{}'.format(n_gram)]).mean())
bleu_dict['bleu-{}-avg'.format(n_gram)].append(
np.array(results['bleu-{}-avg'.format(n_gram)]).mean()
)
return bleu_dict
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.