text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
"""
scraper manager module.
"""
import requests
from bs4 import BeautifulSoup
import pyrin.configuration.services as config_services
from pyrin.core.structs import Manager
from pyrin.processor.request.enumerations import RequestHeaderEnum
from charma.scraper import ScraperPackage
class ScraperManager(Manager):
"""
scraper manager class.
"""
package_class = ScraperPackage
def __init__(self):
"""
initializes an instance of ScraperManager.
"""
super().__init__()
self._user_agent = config_services.get('scraper', 'general', 'user_agent')
self._parser = config_services.get('scraper', 'general', 'parser')
def get(self, url, **options):
"""
gets the result of given url and returns a `Response` object.
:param str url: url to be fetched.
:keyword bool add_user_agent: add user agent into request headers.
defaults to True if not provided.
:keyword bool allow_redirects: allow redirects.
defaults to True if not provided.
:keyword dict headers: headers to be sent with request.
:rtype: requests.Response
"""
headers = options.get('headers') or {}
options.setdefault('allow_redirects', True)
add_user_agent = options.pop('add_user_agent', True)
if add_user_agent is True:
headers[RequestHeaderEnum.USER_AGENT] = self._user_agent
# we have to set 'Accept-Language' header to 'en-US'
# to get consistent results for any movie on any client.
headers[RequestHeaderEnum.ACCEPT_LANGUAGE] = 'en-US'
options.update(headers=headers)
response = requests.get(url, **options)
response.raise_for_status()
return response
def get_soup(self, url, **options):
"""
gets the result of given url and returns a `BeautifulSoup` object.
:param str url: url to be fetched.
:keyword bool add_user_agent: add user agent into request headers.
defaults to True if not provided.
:keyword bool allow_redirects: allow redirects.
defaults to True if not provided.
:keyword dict headers: headers to be sent with request.
:rtype: bs4.BeautifulSoup
"""
response = self.get(url, **options)
return BeautifulSoup(response.text, self._parser)
|
# coding: utf-8
#行业龙头股均线
#股票池需要如下:
#沪深300池,
#当前不停牌的股票池,
#有历史数据的股票池,
#两者的交集得到可用股票池
#持仓股票池
#可用股票池中剔除持仓股票得到的股票池(可以进行买入操作的股票池)
#将要买入的股票池:即上述股票池中发出买入信号得到的股票池
#将要卖出的股票池:持仓股票池中,没有停牌的,发出卖出信号的股票池
enable_profile()
import random
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import scipy.stats as stats
import math
def initialize(context):
#初始化沪深300
#g.stocks_ttl = get_index_stocks('000300.XSHE')
g.stocks_ttl = ['000651.XSHE', '601318.XSHG', '000012.XSHE', '002299.XSHE',
'600150.XSHG', '600027.XSHG', '600900.XSHG', '600406.XSHG',
'600151.XSHG', '002241.XSHE', '000563.XSHE', '000002.XSHE',
'601668.XSHG', '002269.XSHE', '600019.XSHG', '600089.XSHG',
'600362.XSHG', '002005.XSHE', '600115.XSHG', '601866.XSHG',
'600352.XSHG', '600004.XSHG', '601186.XSHG', '002415.XSHE',
'002489.XSHE', '002340.XSHE', '601088.XSHG', '000581.XSHE',
'000625.XSHE', '600917.XSHG', '600415.XSHG', '600196.XSHG',
'600256.XSHG', '600887.XSHG', '600585.XSHG', '601006.XSHG',
'000063.XSHE', '300027.XSHE', '600456.XSHG', '600511.XSHG',
'600036.XSHG', '600519.XSHG', '600663.XSHG', '600030.XSHG',
'000538.XSHE', '600108.XSHG', '600655.XSHG']
set_universe(g.stocks_ttl)
set_commission(PerTrade(buy_cost=0.0005, sell_cost=0.0013, min_cost=5))
#初始化可用股票池(不停牌的股票池)
g.stocks_trading = []
#初始化有历史数据的股票池
g.stocks_hist = []
#初始化持仓股票池
g.stocks_hold = []
#初始化备选股票池
g.stocks_toChoose = []
#将要买入的股票池
g.stocks_toBuy = []
#将要卖出的股票池
g.stocks_toSell = []
#长短均线的间隔
g.shortMA = 1
g.longMA = 60
#持仓持有股票数量的最大上限
g.numHoldmax = 10
# 坑满时每次调仓量
g.num_of_change = 2
#设置计算几日收益率
g.period = 10
def before_trading_start(context):
#得到当前的可用的股票池
#g.stocks_ttl = get_index_stocks('000300.XSHE')
#set_universe(g.stocks_ttl)
current_data = get_current_data(g.stocks_ttl)
g.stocks_trading = []
length = len(g.stocks_ttl)
#得到有历史数据的股票池,并且得到有历史数据
operate_buy = {}
g.stocks_hist = []
for i in range(0, length):
#得到当前的不停牌股票
if not current_data[g.stocks_ttl[i]].paused:
hdict = attribute_history(
g.stocks_ttl[i],
g.longMA + 1,
'1d', ('close'),
skip_paused=True,
df=False)
#取出有历史数据的股票,如果历史数据能够支撑运算,顺便也把买入的信号一做
temp_hist = hdict['close']
temp_hist = np.array([x for x in temp_hist if str(x) != 'nan'])
if len(temp_hist) >= g.longMA:
g.stocks_hist.append(g.stocks_ttl[i])
signal = cal_signal(temp_hist)
operate_buy.update({g.stocks_ttl[i]: signal})
#得到最后的备选池
g.stocks_toChoose = list(set(g.stocks_hist).difference(set(g.stocks_hold)))
#得到最终的可执行池:即发出信号的买入池
g.stocks_toBuy = []
length = len(g.stocks_toChoose)
for i in range(0, length):
if operate_buy[g.stocks_toChoose[i]] == 1:
g.stocks_toBuy.append(g.stocks_toChoose[i])
#得到发出信号的卖出池
g.stocks_toSell = []
length = len(g.stocks_hold)
if length > 0:
current_hold = get_current_data(g.stocks_hold)
for i in range(0, length):
#如果当天股票不停牌,则进行下一步计算
if not current_hold[g.stocks_hold[i]].paused:
temp_hist = attribute_history(
g.stocks_hold[i],
g.longMA + 1,
'1d', ('close'),
skip_paused=True,
df=False)
signal = cal_signal(temp_hist['close'])
if signal == -1:
g.stocks_toSell.append(g.stocks_hold[i])
#输入某只股票的hist数据,然后判断是否发出买入卖出信号,输入为dataframe,输出为
#数字1:买入,-1卖出,0表示不变
def cal_signal(data_withHist):
MA_shortYesterday = calMA(data_withHist, g.shortMA)
MA_shortBeforeYesterday = calMA(data_withHist[:-1], g.shortMA)
MA_longYesterday = calMA(data_withHist, g.longMA)
MA_longBeforeYesterday = calMA(data_withHist[:-1], g.longMA)
signal = 0
if MA_shortBeforeYesterday < MA_longBeforeYesterday and MA_shortYesterday > MA_longYesterday:
signal = 1
elif MA_shortYesterday < MA_longYesterday:
signal = -1
return signal
#输入某只股票的hist数据,计算MA,返回一个数
def calMA(data_withHist, num):
ma = data_withHist[-1 * num:].mean()
return ma
#构成一个列索引为股票名,收益率一行的索引为
#'return'的dataframe,并返回这个dataframe
def calreturn(stocklist):
#取出每只股票period天的收盘价格
stocks_info = history(g.period, '1d', 'close', security_list=stocklist)
#去除信息不全的数据
stocks_info.dropna(axis=0, how='any', thresh=None)
#取出昨天和period天之前的收盘价,计算收益率
a1 = list(stocks_info.iloc[0])
a2 = list(stocks_info.iloc[g.period - 1])
a1 = np.array(a1)
a2 = np.array(a2)
#用一个dataframe来保存所有股票的收益率信息
stocks_return = DataFrame(
a2 / a1, columns=['return'], index=stocks_info.columns)
stocks_info = stocks_info.T
#把收益率的数据加到相应的列
stocks_info = pd.concat([stocks_info, stocks_return], axis=1)
#将股票信息按照收益率从小到大来存储
stocks_info = stocks_info.sort(columns=['return'], ascending=[True])
#返回处理好的dataframe
return stocks_info
def handle_data(context, data):
#首先卖出持仓中该卖出的股票
for i in range(0, len(g.stocks_toSell)):
order_target(g.stocks_toSell[i], 0, LimitOrderStyle(0.01))
g.stocks_hold = list(set(g.stocks_hold).difference(set(g.stocks_toSell)))
# 计算还剩下的坑位
num_canBuy = g.numHoldmax - len(g.stocks_hold)
# 如果坑满了,剔除坑中持有的一部分收益不好的股票,买入备选池中收益率较好的股票
if num_canBuy == 0 and len(g.stocks_toBuy) > 0:
stocks_holdreturnup = calreturn(g.stocks_hold)
stocks_tobuyreturnup = calreturn(g.stocks_toBuy)
for i in range(0, min(len(g.stocks_toBuy), g.num_of_change)):
order_target(stocks_holdreturnup.index[i], 0,
LimitOrderStyle(0.01))
g.stocks_hold.remove(stocks_holdreturnup.index[i])
num_canBuy = g.numHoldmax - len(g.stocks_hold)
cash_ttl = context.portfolio.cash
cash_perShare = cash_ttl / num_canBuy
for i in range(0, num_canBuy):
order_target_value(
stocks_tobuyreturnup.index[len(stocks_tobuyreturnup) - i - 1],
cash_perShare, LimitOrderStyle(9999))
# 如果坑没满
elif num_canBuy > 0:
cash_ttl = context.portfolio.cash
cash_perShare = cash_ttl / num_canBuy
#如果备选股票池中的股票数量小于可买的股票,则全部买入
if len(g.stocks_toBuy) <= num_canBuy:
for i in range(0, len(g.stocks_toBuy)):
order_target_value(g.stocks_toBuy[i], cash_perShare,
LimitOrderStyle(9999))
#如果备选池的股票大于可买数量,则按照收益率买入
elif len(g.stocks_toBuy) > num_canBuy:
stocks_tobuyreturnup = calreturn(g.stocks_toBuy)
for i in range(0, num_canBuy):
order_target_value(stocks_tobuyreturnup.index[len(
stocks_tobuyreturnup) - i - 1], cash_perShare,
LimitOrderStyle(9999))
#成功下单完了以后更新g.stocks_hold
#记录一下实际持仓:
def after_trading_end(context):
g.stocks_hold = context.portfolio.positions.keys()
|
# Updated 2018
# This module is based on the below cited resources, which are all
# based on the documentation as provided in the Bosch Data Sheet and
# the sample implementation provided therein.
#
# Final Document: BST-BME280-DS002-15
#
# Authors: Paul Cunnane 2016, Peter Dahlebrg 2016
#
# This module borrows from the Adafruit BME280 Python library. Original
# Copyright notices are reproduced below.
#
# Those libraries were written for the Raspberry Pi. This modification is
# intended for the MicroPython and esp8266 boards.
#
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Based on the BMP280 driver with BME280 changes provided by
# David J Taylor, Edinburgh (www.satsignal.eu)
#
# Based on Adafruit_I2C.py created by Kevin Townsend.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address.
BME280_I2CADDR = 0x76
# Operating Modes
BME280_OSAMPLE_1 = 1
BME280_OSAMPLE_2 = 2
BME280_OSAMPLE_4 = 3
BME280_OSAMPLE_8 = 4
BME280_OSAMPLE_16 = 5
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL = 0xF4
class BME280:
def __init__(self,
mode=BME280_OSAMPLE_8,
address=BME280_I2CADDR,
i2c=None,
**kwargs):
# Check that mode is valid.
if mode not in [BME280_OSAMPLE_1, BME280_OSAMPLE_2, BME280_OSAMPLE_4,
BME280_OSAMPLE_8, BME280_OSAMPLE_16]:
raise ValueError(
'Unexpected mode value {0}. Set mode to one of '
'BME280_ULTRALOWPOWER, BME280_STANDARD, BME280_HIGHRES, or '
'BME280_ULTRAHIGHRES'.format(mode))
self._mode = mode
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
self.__sealevel = 101325
# load calibration data
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3, self.dig_H4,\
self.dig_H5, self.dig_H6 = unpack("<hBbhb", dig_e1_e7)
# unfold H4, H5, keeping care of a potential sign
self.dig_H4 = (self.dig_H4 * 16) + (self.dig_H5 & 0xF)
self.dig_H5 //= 16
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
bytearray([0x3F]))
self.t_fine = 0
# temporary data holders which stay allocated
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
def read_raw_data(self, result):
""" Reads the raw (uncompensated) data from the sensor.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order
Returns:
None
"""
self._l1_barray[0] = self._mode
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
self._l1_barray[0] = self._mode << 5 | self._mode << 2 | 1
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
self._l1_barray)
# Wait for conversion to complete
while self.i2c.readfrom_mem(self.address, BME280_REGISTER_STATUS, 1)[0] & 0x08:
time.sleep_ms(5)
# burst readout from 0xF7 to 0xFE, recommended by datasheet
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
# pressure(0xF7): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
# temperature(0xFA): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
# humidity(0xFD): (msb << 8) | lsb
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Reads the data from the sensor and returns the compensated data.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order. You may use
this to read out the sensor without allocating heap memory
Returns:
array with temperature, pressure, humidity. Will be the one
from the result parameter if not None
"""
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
# temperature
var1 = (raw_temp/16384.0 - self.dig_T1/1024.0) * self.dig_T2
var2 = raw_temp/131072.0 - self.dig_T1/8192.0
var2 = var2 * var2 * self.dig_T3
self.t_fine = int(var1 + var2)
temp = (var1 + var2) / 5120.0
temp = max(-40, min(85, temp))
# pressure
var1 = (self.t_fine/2.0) - 64000.0
var2 = var1 * var1 * self.dig_P6 / 32768.0 + var1 * self.dig_P5 * 2.0
var2 = (var2 / 4.0) + (self.dig_P4 * 65536.0)
var1 = (self.dig_P3 * var1 * var1 / 524288.0 + self.dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.dig_P1
if (var1 == 0.0):
pressure = 30000 # avoid exception caused by division by zero
else:
p = ((1048576.0 - raw_press) - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.dig_P9 * p * p / 2147483648.0
var2 = p * self.dig_P8 / 32768.0
pressure = p + (var1 + var2 + self.dig_P7) / 16.0
pressure = max(30000, min(110000, pressure))
# humidity
h = (self.t_fine - 76800.0)
h = ((raw_hum - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * h)) *
(self.dig_H2 / 65536.0 * (1.0 + self.dig_H6 / 67108864.0 * h *
(1.0 + self.dig_H3 / 67108864.0 * h))))
humidity = h * (1.0 - self.dig_H1 * h / 524288.0)
# humidity = max(0, min(100, humidity))
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("f", (temp, pressure, humidity))
@property
def sealevel(self):
return self.__sealevel
@sealevel.setter
def sealevel(self, value):
if 30000 < value < 120000: # just ensure some reasonable value
self.__sealevel = value
@property
def altitude(self):
'''
Altitude in m.
'''
from math import pow
try:
p = 44330 * (1.0 - pow(self.read_compensated_data()[1] /
self.__sealevel, 0.1903))
except:
p = 0.0
return p
@property
def dew_point(self):
"""
Compute the dew point temperature for the current Temperature
and Humidity measured pair
"""
from math import log
t, p, h = self.read_compensated_data()
h = (log(h, 10) - 2) / 0.4343 + (17.62 * t) / (243.12 + t)
return 243.12 * h / (17.62 - h)
@property
def values(self):
""" human readable values """
t, p, h = self.read_compensated_data()
return ("{:.2f}C".format(t), "{:.2f}hPa".format(p/100),
"{:.2f}%".format(h))
|
"""The sample class"""
from datamodel.submittable import AccessionedSubmittable
class Sample(AccessionedSubmittable):
"""
:param alias: string, unique sample name in the experiment
:param accession: string, BioSamples accession
:param taxon: string, latin species name
:param taxonId: str, NCBI taxonomy identifier for the species
:param attributes: dictionary of attribute categories as keys and Attribute class object as value
:param material_type: string, (optional) one of: whole organism, organism part, cell, RNA, DNA
:param description: string, (optional) free-text description of sample
"""
def __init__(self, **kwargs):
AccessionedSubmittable.__init__(self, **kwargs)
self.taxon: str = kwargs.get("taxon")
self.taxonId: str = kwargs.get("taxonId")
self.material_type: str = kwargs.get("material_type")
self.attributes: dict = kwargs.get("attributes", {})
# Remove material type from attributes if already defined in main attribute
if self.material_type and "material_type" in self.attributes:
del self.attributes["material_type"]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face Recognition dataset."""
import sys
import os
import math
import pickle
from collections import defaultdict
import numpy as np
from PIL import Image, ImageFile
from model_utils.config import config
from mindspore.communication.management import get_group_size, get_rank
ImageFile.LOAD_TRUNCATED_IMAGES = True
__all__ = ['DistributedCustomSampler', 'CustomDataset']
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
class DistributedCustomSampler:
'''DistributedCustomSampler'''
def __init__(self, dataset, num_replicas=None, rank=None, is_distributed=1, shuffle=True, k=2):
assert isinstance(dataset, CustomDataset), 'Custom Sampler is Only Support Custom Dataset!!!'
if is_distributed:
if num_replicas is None:
num_replicas = get_group_size()
if rank is None:
rank = get_rank()
else:
if num_replicas is None:
num_replicas = 1
if rank is None:
rank = 0
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.ratio = 4.0
self.data_len = len(self.dataset.classes)
self.num_ids = int(math.ceil(self.data_len * 1.0 / self.num_replicas))
self.total_ids = self.num_ids * self.num_replicas
self.num_samples = math.ceil(len(self.dataset) * 1.0 / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.k = k
self.epoch_gen = 1
def _sample_(self, indices):
"""sample"""
sampled = []
for indice in indices:
sampled_id = indice
if config.device_target == 'CPU':
if self.k >= len(sampled_id):
continue
sampled.extend(np.random.choice(self.dataset.id2range[sampled_id][:], self.k).tolist())
return sampled
def __iter__(self):
if self.shuffle:
# Note, the self.epoch parameter does not get updated in DE
self.epoch_gen = (self.epoch_gen + 1) & 0xffffffff
np.random.seed(self.epoch_gen)
indices = np.random.permutation(len(self.dataset.classes))
indices = indices.tolist()
else:
indices = list(range(len(self.dataset.classes)))
indices += indices[:(self.total_ids - len(indices))]
assert len(indices) == self.total_ids
indices = indices[self.rank*self.num_ids:(self.rank+1)*self.num_ids]
assert len(indices) == self.num_ids
sampled_idxs = self._sample_(indices)
return iter(sampled_idxs)
def __len__(self):
return self.num_ids * self.k
def set_epoch(self, epoch):
self.epoch = epoch
def merge_indices(self, list1, list2):
'''merge_indices'''
list_result = []
ct_1, ct_2 = 0, 0
for i in range(self.data_len):
if (i+1) % int(self.ratio+1) == 0:
list_result.append(list2[ct_2])
ct_2 += 1
else:
list_result.append(list1[ct_1])
ct_1 += 1
return list_result
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def make_dataset(dir_1, class_to_idx, extensions=None, is_valid_file=None):
'''make_dataset'''
images = []
dir_1 = os.path.expanduser(dir_1)
if not (extensions is None) ^ (is_valid_file is None):
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def f(x):
return has_file_allowed_extension(x, extensions)
is_valid_file = f
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir_1, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_to_idx[target])
images.append(item)
return images
class ImageFolderDataset:
'''ImageFolderDataset'''
def __init__(self, root, cache_path, is_distributed):
if not os.path.isfile(cache_path):
self.classes, self.classes_to_idx = self._find_classes(root)
self.samples = make_dataset(root, self.classes_to_idx, IMG_EXTENSIONS, None)
self.id2range = self._build_id2range()
cache = dict()
cache['classes'] = self.classes
cache['classes_to_idx'] = self.classes_to_idx
cache['samples'] = self.samples
cache['id2range'] = self.id2range
if is_distributed:
print("******* TODO: All workers will write cache... Need to only dump when rank == 0 ******")
if get_rank() == 0:
with open(cache_path, 'wb') as fw:
pickle.dump(cache, fw)
print('local dump cache:{}'.format(cache_path))
else:
with open(cache_path, 'wb') as fw:
pickle.dump(cache, fw)
print('local dump cache:{}'.format(cache_path))
else:
print('loading cache from %s'%cache_path)
with open(cache_path, 'rb') as fr:
cache = pickle.load(fr)
self.classes, self.classes_to_idx, self.samples, self.id2range = cache['classes'], \
cache['classes_to_idx'], \
cache['samples'], cache['id2range']
self.all_image_idxs = range(len(self.samples))
self.classes = list(self.id2range.keys())
def _find_classes(self, dir_1):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir_1) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir_1) if os.path.isdir(os.path.join(dir_1, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def _build_id2range(self):
'''_build_id2range'''
id2range = defaultdict(list)
ret_range = defaultdict(list)
for idx, sample in enumerate(self.samples):
label = sample[1]
id2range[label].append((sample, idx))
for key in id2range:
id2range[key].sort(key=lambda x: int(os.path.basename(x[0][0]).split('.')[0]))
for item in id2range[key]:
ret_range[key].append(item[1])
return ret_range
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
def pil_loader(path):
"""
Loads the image
Args:
path: path to the image
Returns:
Object: pil_loader
"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
class CustomDataset:
'''CustomDataset'''
def __init__(self, root, cache_path, is_distributed=1, transform=None, target_transform=None,
loader=pil_loader):
self.dataset = ImageFolderDataset(root, cache_path, is_distributed)
print('CustomDataset len(dataset):{}'.format(len(self.dataset)))
self.loader = loader
self.transform = transform
self.target_transform = target_transform
self.classes = self.dataset.classes
self.id2range = self.dataset.id2range
def __getitem__(self, index):
path, target = self.dataset[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.dataset)
|
import matplotlib.pyplot as plt
from matplotlib import colors
import subprocess
import sys
def create_colormap(file_data, numRows, numCols, gen_number):
data = []
for i in range(0, numRows):
temp = []
for j in range(0, numCols):
if file_data[i][j] == '0':
temp.append(0)
else:
temp.append(1)
data.append(temp)
# create colormap
cmap = colors.ListedColormap(['#777777', '#28F1B3'])
bounds = [0,1,2]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig, ax = plt.subplots(1, 1, figsize=(numRows/4, numCols/4), dpi=1200/(numRows/4))
ax.imshow(data, cmap=cmap, norm=norm)
# draw gridlines
ax.grid(which='major', axis='both', linestyle='-', color='k', linewidth=2)
# ax.set_xticks(np.arange(-.5, 10, 1))
# ax.set_yticks(np.arange(-.5, 10, 1))
for x in range(numRows + 1):
ax.axhline(x-0.5, lw=1.1, color='k', zorder=5)
for x in range(numCols + 1):
ax.axvline(x-0.5, lw=1.1, color='k', zorder=5)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
plt.savefig("results/gen" + str(gen_number) + ".png", transparent=True)
if __name__ == "__main__":
generation_number = int(sys.argv[1])
file_data = []
with open("results/gen" + str(generation_number) + ".txt") as f:
for i, line in enumerate(f):
if i == 0:
num_rows, num_cols = line.strip("\n").split(" ")
else:
row = line.strip("\n").split(" ")
file_data.append(row)
create_colormap(file_data, int(num_rows), int(num_cols), generation_number)
|
"""
Define facilities for automatically upgrading databases.
"""
# NOTE: This code is written slightly to be more generic than we currently
# use. In particular, we maintain multiple migration lists based on a 'schema
# version'. This was done in case we need to add some kind of migration
# functionality for the individual test suites, which is not unreasonable.
import os
import re
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
from sqlalchemy import Column, String, Integer
from lnt.util import logger
import lnt.server.db.util
###
# Schema for in-database version information.
Base = sqlalchemy.ext.declarative.declarative_base()
class SchemaVersion(Base):
__tablename__ = 'SchemaVersion'
name = Column("Name", String(256), primary_key=True, unique=True)
version = Column("Version", Integer)
def __init__(self, name, version):
self.name = name
self.version = version
def __repr__(self):
return '%s%r' % (self.__class__.__name__, (self.name, self.version))
###
# Migrations auto-discovery.
def _load_migrations():
"""
Load available migration scripts from a directory.
Migrations are organized as:
<current dir>/migrations/
<current dir>/migrations/upgrade_<N>_to_<N+1>.py
...
"""
upgrade_script_rex = re.compile(
r'^upgrade_(0|[1-9][0-9]*)_to_([1-9][0-9]*)\.py$')
migrations = {}
# Currently, we only load migrations for a '__core__' schema, and only from
# the migrations directory. One idea if we need to eventually support
# migrations for the per-testsuite tables is to add subdirectories keyed on
# the testsuite.
for schema_name in ('__core__',):
schema_migrations_path = os.path.join(os.path.dirname(__file__),
'migrations')
schema_migrations = {}
for item in os.listdir(schema_migrations_path):
# Ignore certain known non-scripts.
if item in ('README.txt', '__init__.py', 'new_suite.py',
'util.py', '__pycache__') or item.endswith('.pyc'):
continue
# Ignore non-matching files.
m = upgrade_script_rex.match(item)
if m is None:
logger.warning(
"ignoring item %r in schema migration directory: %r",
item, schema_migrations_path)
continue
# Check the version numbers for validity.
version, next_version = map(int, m.groups())
if next_version != version + 1:
logger.error(
"invalid script name %r in schema migration directory: %r",
item, schema_migrations_path)
continue
schema_migrations[version] = os.path.join(
schema_migrations_path, item)
# Ignore directories with no migrations.
if not schema_migrations:
logger.warning("ignoring empty migrations directory: %r",
schema_migrations_path)
continue
# Check the provided versions for sanity.
current_version = max(schema_migrations) + 1
for i in range(current_version):
if i not in schema_migrations:
logger.error("schema %r is missing migration for version: %r",
schema_name, i)
# Store the current version as another item in the per-schema migration
# dictionary.
schema_migrations['current_version'] = current_version
# Store the schema migrations.
migrations[schema_name] = schema_migrations
return migrations
###
# Auto-upgrading support.
def _set_schema_version(engine, schema_name, new_version):
# Keep the updating to a single transaction that is immediately committed.
session = sqlalchemy.orm.sessionmaker(engine)()
schema_version = session.query(SchemaVersion) \
.filter(SchemaVersion.name == schema_name) \
.first()
if schema_version is None:
schema_version = SchemaVersion(schema_name, new_version)
else:
schema_version.version = new_version
session.add(schema_version)
session.commit()
session.close()
def update_schema(engine, versions, available_migrations, schema_name):
schema_migrations = available_migrations[schema_name]
# Get the current schema version.
db_version = versions.get(schema_name, None)
current_version = schema_migrations['current_version']
# If there was no previous version, initialize the version.
if db_version is None:
logger.info("assigning initial version for schema %r",
schema_name)
_set_schema_version(engine, schema_name, 0)
db_version = 0
# If we are up-to-date, do nothing.
if db_version == current_version:
return False
# Otherwise, update the database.
if db_version > current_version:
logger.error("invalid schema %r version %r (greater than current)",
schema_name, db_version)
return False
logger.info("updating schema %r from version %r to current version %r",
schema_name, db_version, current_version)
while db_version < current_version:
# Lookup the upgrade function for this version.
upgrade_script = schema_migrations[db_version]
globals = {}
with open(upgrade_script) as f:
exec(compile(f.read(), upgrade_script, 'exec'), globals)
upgrade_method = globals['upgrade']
# Execute the upgrade.
#
# FIXME: Backup the database here.
#
# FIXME: Execute this inside a transaction?
logger.info("applying upgrade for version %d to %d" % (
db_version, db_version+1))
upgrade_method(engine)
# Update the schema version.
db_version += 1
_set_schema_version(engine, schema_name, db_version)
return True
def update(engine):
any_changed = False
# Load the available migrations.
available_migrations = _load_migrations()
Base.metadata.create_all(engine)
session = sqlalchemy.orm.sessionmaker(engine)()
version_list = session.query(SchemaVersion).all()
session.close()
versions = dict((v.name, v.version)
for v in version_list)
# Update the core schema.
any_changed |= update_schema(engine, versions,
available_migrations, '__core__')
if any_changed:
logger.info("database auto-upgraded")
def update_path(path):
# If the path includes no database type, assume sqlite.
if lnt.server.db.util.path_has_no_database_type(path):
path = 'sqlite:///' + path
engine = sqlalchemy.create_engine(path)
update(engine)
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StorageFlexUtilPhysicalDriveRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
StorageFlexUtilPhysicalDriveRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this StorageFlexUtilPhysicalDriveRef.
The Object Type of the referenced REST resource.
:return: The object_type of this StorageFlexUtilPhysicalDriveRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this StorageFlexUtilPhysicalDriveRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this StorageFlexUtilPhysicalDriveRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this StorageFlexUtilPhysicalDriveRef.
The Moid of the referenced REST resource.
:return: The moid of this StorageFlexUtilPhysicalDriveRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this StorageFlexUtilPhysicalDriveRef.
The Moid of the referenced REST resource.
:param moid: The moid of this StorageFlexUtilPhysicalDriveRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this StorageFlexUtilPhysicalDriveRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this StorageFlexUtilPhysicalDriveRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this StorageFlexUtilPhysicalDriveRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this StorageFlexUtilPhysicalDriveRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StorageFlexUtilPhysicalDriveRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import os
import pytest
import six
MAX_ALLOWED_LINKS_COUNT = 10
@pytest.fixture
def metrics(request):
class Metrics(object):
@classmethod
def set(cls, name, value):
assert len(name) <= 128, "Length of the metric name must less than 128"
assert type(value) in [int, float], "Metric value must be of type int or float"
test_name = request.node.nodeid
if test_name not in request.config.test_metrics:
request.config.test_metrics[test_name] = {}
request.config.test_metrics[test_name][name] = value
@classmethod
def set_benchmark(cls, benchmark_values):
# report of google has key 'benchmarks' which is a list of benchmark results
# yandex benchmark has key 'benchmark', which is a list of benchmark results
# use this to differentiate which kind of result it is
if 'benchmarks' in benchmark_values:
cls.set_gbenchmark(benchmark_values)
else:
cls.set_ybenchmark(benchmark_values)
@classmethod
def set_ybenchmark(cls, benchmark_values):
for benchmark in benchmark_values["benchmark"]:
name = benchmark["name"]
for key, value in benchmark.iteritems():
if key != "name":
cls.set("{}_{}".format(name, key), value)
@classmethod
def set_gbenchmark(cls, benchmark_values):
time_unit_multipliers = {"ns": 1, "us": 1000, "ms": 1000000}
time_keys = {"real_time", "cpu_time"}
ignore_keys = {"name", "run_name", "time_unit", "run_type", "repetition_index"}
for benchmark in benchmark_values["benchmarks"]:
name = benchmark["name"]
time_unit_mult = time_unit_multipliers[benchmark.get("time_unit", "ns")]
for k, v in six.iteritems(benchmark):
if k in time_keys:
cls.set("{}_{}".format(name, k), v * time_unit_mult)
elif k not in ignore_keys and isinstance(v, (float, int)):
cls.set("{}_{}".format(name, k), v)
return Metrics
@pytest.fixture
def links(request):
class Links(object):
@classmethod
def set(cls, name, path):
if len(request.config.test_logs[request.node.nodeid]) >= MAX_ALLOWED_LINKS_COUNT:
raise Exception("Cannot add more than {} links to test".format(MAX_ALLOWED_LINKS_COUNT))
reserved_names = ["log", "logsdir", "stdout", "stderr"]
if name in reserved_names:
raise Exception("Attachment name should not belong to the reserved list: {}".format(", ".join(reserved_names)))
output_dir = request.config.ya.output_dir
if not os.path.exists(path):
raise Exception("Path to be attached does not exist: {}".format(path))
if os.path.isabs(path) and ".." in os.path.relpath(path, output_dir):
raise Exception("Test attachment must be inside yatest.common.output_path()")
request.config.test_logs[request.node.nodeid][name] = path
@classmethod
def get(cls, name):
if name not in request.config.test_logs[request.node.nodeid]:
raise KeyError("Attachment with name '{}' does not exist".format(name))
return request.config.test_logs[request.node.nodeid][name]
return Links
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJellyfish(PythonPackage):
"""a library for doing approximate and phonetic matching of strings."""
pypi = "jellyfish/jellyfish-0.6.1.tar.gz"
version('0.6.1', sha256='5104e45a2b804b48a46a92a5e6d6e86830fe60ae83b1da32c867402c8f4c2094')
version('0.5.6', sha256='887a9a49d0caee913a883c3e7eb185f6260ebe2137562365be422d1316bd39c9')
depends_on('py-setuptools', type='build')
|
"""
This is an implementation of Function Secret Sharing
Useful papers are:
- Function Secret Sharing- Improvements and Extensions, Boyle 2017
Link: https://eprint.iacr.org/2018/707.pdf
- Secure Computation with Preprocessing via Function Secret Sharing, Boyle 2019
Link: https://eprint.iacr.org/2019/1095
Note that the protocols are quite different in aspect from those papers
"""
import math
import numpy as np
import shaloop
import multiprocessing
import asyncio
import torch as th
import syft as sy
from syft.exceptions import EmptyCryptoPrimitiveStoreError
from syft.workers.websocket_client import WebsocketClientWorker
from syft.generic.utils import allow_command
from syft.generic.utils import remote
λ = 127 # security parameter
n = 32 # bit precision
λs = math.ceil(λ / 64) # how many int64 are needed to store λ, here 2
if λs != 2:
raise ValueError("Check the value of security parameter")
no_wrap = {"no_wrap": True}
def full_name(f):
return f"syft.frameworks.torch.mpc.fss.{f.__name__}"
# internal codes
EQ = 0
COMP = 1
# number of processes
N_CORES = max(4, multiprocessing.cpu_count())
MULTI_LIMIT = 50_000
def _get_items(partitions):
list_items = [[] for _ in range(len(partitions[0]))]
for partition in partitions:
for i, item in enumerate(partition):
if isinstance(item, tuple):
if len(list_items[i]) == 0:
list_items[i] = [[] for _ in range(len(item))]
for j, it in enumerate(item):
list_items[i][j].append(it)
else:
list_items[i].append(item)
return list_items
def _get_primitives(list_items):
primitives = []
for items in list_items:
if isinstance(items[0], np.ndarray):
primitive = concat(*items, axis=-1)
else:
primitive = tuple(concat(*its, axis=-1) for its in items)
primitives.append(primitive)
return primitives
def keygen(n_values, op):
"""
Run FSS keygen in parallel to accelerate the offline part of the protocol
Args:
n_values (int): number of primitives to generate
op (str): eq or comp <=> DPF or DIF
"""
if op == "eq":
return DPF.keygen(n_values=n_values)
if op == "comp":
if n_values <= MULTI_LIMIT:
return DIF.keygen(n_values)
multiprocessing_args = []
slice_size = math.ceil(n_values / N_CORES)
for j in range(N_CORES):
n_instances = min((j + 1) * slice_size, n_values) - j * slice_size
process_args = (n_instances,) # TODO add a seed element for the PRG?
multiprocessing_args.append(process_args)
with multiprocessing.Pool() as p:
partitions = p.starmap(DIF.keygen, multiprocessing_args)
list_items = _get_items(partitions)
return _get_primitives(list_items)
raise ValueError(f"{op} is an unsupported operation.")
def fss_op(x1, x2, op="eq"):
"""
Define the workflow for a binary operation using Function Secret Sharing
Currently supported operand are = & <=, respectively corresponding to
op = 'eq' and 'comp'
Args:
x1: first AST
x2: second AST
op: type of operation to perform, should be 'eq' or 'comp'
Returns:
shares of the comparison
"""
if isinstance(x1, sy.AdditiveSharingTensor):
locations = x1.locations
class_attributes = x1.get_class_attributes()
else:
locations = x2.locations
class_attributes = x2.get_class_attributes()
dtype = class_attributes.get("dtype")
asynchronous = isinstance(locations[0], WebsocketClientWorker)
workers_args = [
(
x1.child[location.id]
if isinstance(x1, sy.AdditiveSharingTensor)
else (x1 if i == 0 else 0),
x2.child[location.id]
if isinstance(x2, sy.AdditiveSharingTensor)
else (x2 if i == 0 else 0),
op,
)
for i, location in enumerate(locations)
]
try:
shares = []
for i, location in enumerate(locations):
share = remote(mask_builder, location=location)(*workers_args[i], return_value=True)
shares.append(share)
except EmptyCryptoPrimitiveStoreError as e:
if sy.local_worker.crypto_store.force_preprocessing:
raise
sy.local_worker.crypto_store.provide_primitives(workers=locations, **e.kwargs_)
return fss_op(x1, x2, op)
# async has a cost which is too expensive for this command
# shares = asyncio.run(sy.local_worker.async_dispatch(
# workers=locations,
# commands=[
# (full_name(mask_builder), None, workers_args[i], {})
# for i in [0, 1]
# ],
# return_value=True
# ))
mask_value = sum(shares) % 2 ** n
for location, share in zip(locations, shares):
location.de_register_obj(share)
del share
workers_args = [(th.IntTensor([i]), mask_value, op, dtype) for i in range(2)]
if not asynchronous:
shares = []
for i, location in enumerate(locations):
share = remote(evaluate, location=location)(*workers_args[i], return_value=False)
shares.append(share)
else:
print("async")
shares = asyncio.run(
sy.local_worker.async_dispatch(
workers=locations,
commands=[(full_name(evaluate), None, workers_args[i], {}) for i in [0, 1]],
)
)
shares = {loc.id: share for loc, share in zip(locations, shares)}
response = sy.AdditiveSharingTensor(shares, **class_attributes)
return response
# share level
@allow_command
def mask_builder(x1, x2, op):
if not isinstance(x1, int):
worker = x1.owner
numel = x1.numel()
else:
worker = x2.owner
numel = x2.numel()
x = x1 - x2
# Keep the primitive in store as we use it after
# you actually get a share of alpha
alpha, s_0, *CW = worker.crypto_store.get_keys(f"fss_{op}", n_instances=numel, remove=False)
r = x + th.tensor(alpha.astype(np.int64)).reshape(x.shape)
return r
# share level
@allow_command
def evaluate(b, x_masked, op, dtype):
if op == "eq":
return eq_evaluate(b, x_masked)
elif op == "comp":
numel = x_masked.numel()
if numel > MULTI_LIMIT:
# print('MULTI EVAL', numel, x_masked.owner)
owner = x_masked.owner
multiprocessing_args = []
original_shape = x_masked.shape
x_masked = x_masked.reshape(-1)
slice_size = math.ceil(numel / N_CORES)
for j in range(N_CORES):
x_masked_slice = x_masked[j * slice_size : (j + 1) * slice_size]
x_masked_slice.owner = owner
process_args = (b, x_masked_slice, owner.id, j, j * slice_size, dtype)
multiprocessing_args.append(process_args)
p = multiprocessing.Pool()
partitions = p.starmap(comp_evaluate, multiprocessing_args)
p.close()
partitions = sorted(partitions, key=lambda k: k[0])
partitions = [partition[1] for partition in partitions]
result = th.cat(partitions)
# Burn the primitives (copies of the workers were sent)
owner.crypto_store.get_keys(f"fss_{op}", n_instances=numel, remove=True)
return result.reshape(*original_shape)
else:
# print('EVAL', numel)
return comp_evaluate(b, x_masked, dtype=dtype)
else:
raise ValueError
# process level
def eq_evaluate(b, x_masked):
alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(
op="fss_eq", n_instances=x_masked.numel(), remove=True
)
result_share = DPF.eval(b.numpy().item(), x_masked.numpy(), s_0, *CW)
return th.tensor(result_share)
# process level
def comp_evaluate(b, x_masked, owner_id=None, core_id=None, burn_offset=0, dtype=None):
if owner_id is not None:
x_masked.owner = x_masked.owner.get_worker(owner_id)
if burn_offset > 0:
_ = x_masked.owner.crypto_store.get_keys(
op="fss_comp", n_instances=burn_offset, remove=True
)
alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(
op="fss_comp", n_instances=x_masked.numel(), remove=True
)
result_share = DIF.eval(b.numpy().item(), x_masked.numpy(), s_0, *CW)
dtype_options = {None: th.long, "int": th.int32, "long": th.long}
result = th.tensor(result_share, dtype=dtype_options[dtype])
if core_id is None:
return result
else:
return core_id, result
def eq(x1, x2):
return fss_op(x1, x2, "eq")
def le(x1, x2):
return fss_op(x1, x2, "comp")
class DPF:
"""Distributed Point Function - used for equality"""
@staticmethod
def keygen(n_values=1):
alpha = np.random.randint(0, 2 ** n, size=(n_values,), dtype=np.uint64)
beta = np.array([1])
α = bit_decomposition(alpha)
s, t, CW = (
Array(n + 1, 2, λs, n_values),
Array(n + 1, 2, n_values),
Array(n, 2, (λs + 1), n_values),
)
_CW = []
s[0] = randbit(shape=(2, λ, n_values))
t[0] = np.array([[0, 1]] * n_values).T
for i in range(0, n):
g0 = G(s[i, 0])
g1 = G(s[i, 1])
# Re-use useless randomness
sL_0, _, sR_0, _ = split(g0, (EQ, λs, 1, λs, 1))
sL_1, _, sR_1, _ = split(g1, (EQ, λs, 1, λs, 1))
s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i])
cw_i = SwitchTableDPF(s_rand, α[i])
CW[i] = cw_i ^ g0 ^ g1
_CW.append(compress(CW[i], α[i], op=EQ))
CWi = uncompress(_CW[i])
for b in (0, 1):
dual_state = [g0, g1][b] ^ (t[i, b] * CWi)
state = multi_dim_filter(dual_state, α[i])
s[i + 1, b], t[i + 1, b] = split(state, (EQ, λs, 1))
CW_n = (-1) ** t[n, 1] * (beta - convert(s[n, 0]) + convert(s[n, 1]))
CW_n = CW_n.astype(np.int64)
return (alpha, s[0][0], s[0][1], *_CW, CW_n)
@staticmethod
def eval(b, x, *k_b):
x = x.astype(np.uint64)
original_shape = x.shape
x = x.reshape(-1)
n_values = x.shape[0]
x = bit_decomposition(x)
s, t = Array(n + 1, λs, n_values), Array(n + 1, 1, n_values)
s[0], *_CW, _CWn = k_b
t[0] = b
for i in range(0, n):
CWi = uncompress(_CW[i])
dual_state = G(s[i]) ^ (t[i] * CWi)
state = multi_dim_filter(dual_state, x[i])
s[i + 1], t[i + 1] = split(state, (EQ, λs, 1))
flat_result = (-1) ** b * (t[n].squeeze() * _CWn + convert(s[n]))
return flat_result.astype(np.int64).reshape(original_shape)
class DIF:
"""Distributed Point Function - used for comparison"""
@staticmethod
def keygen(n_values=1):
alpha = np.random.randint(0, 2 ** n, size=(n_values,), dtype=np.uint64)
α = bit_decomposition(alpha)
s, σ, t, τ, CW, CW_leaf = (
Array(n + 1, 2, λs, n_values),
Array(n + 1, 2, λs, n_values),
Array(n + 1, 2, n_values),
Array(n + 1, 2, n_values),
Array(n, 2, 2 * (λs + 1), n_values),
Array(n + 1, n_values),
)
_CW = []
s[0] = randbit(shape=(2, λ, n_values))
t[0] = np.array([[0, 1]] * n_values).T
for i in range(0, n):
h0 = H(s[i, 0], 0)
h1 = H(s[i, 1], 1)
# Re-use useless randomness
σL_0, _, sL_0, _, σR_0, _, sR_0, _ = split(h0, (COMP, λs, 1, λs, 1, λs, 1, λs, 1))
σL_1, _, sL_1, _, σR_1, _, sR_1, _ = split(h1, (COMP, λs, 1, λs, 1, λs, 1, λs, 1))
s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i])
σ_rand = (σL_0 ^ σL_1) * α[i] + (σR_0 ^ σR_1) * (1 - α[i])
cw_i = SwitchTableDIF(s_rand, σ_rand, α[i])
CW[i] = cw_i ^ h0 ^ h1
_CW.append(compress(CW[i], α[i], op=COMP))
CWi = uncompress(_CW[i], op=COMP)
for b in (0, 1):
dual_state = [h0, h1][b] ^ (t[i, b] * CWi)
# the state obtained by following the special path
state = multi_dim_filter(dual_state, α[i])
_, _, s[i + 1, b], t[i + 1, b] = split(state, (COMP, λs, 1, λs, 1))
# the state obtained by leaving the special path
anti_state = multi_dim_filter(dual_state, 1 - α[i])
σ[i + 1, b], τ[i + 1, b], _, _ = split(anti_state, (COMP, λs, 1, λs, 1))
if b:
# note that we subtract (1 - α[i]), so that leaving the special path can't lead
# to an output == 1 when α[i] == 0 (because it means that your bit is 1 so your
# value is > α)
CW_leaf[i] = (-1) ** τ[i + 1, 1] * (
1 - convert(σ[i + 1, 0]) + convert(σ[i + 1, 1]) - (1 - α[i])
)
CW_leaf[n] = (-1) ** t[n, 1] * (1 - convert(s[n, 0]) + convert(s[n, 1]))
CW_leaf = CW_leaf.astype(np.int32)
return (alpha, s[0][0], s[0][1], *_CW, CW_leaf)
@staticmethod
def eval(b, x, *k_b):
x = x.astype(np.uint64)
original_shape = x.shape
x = x.reshape(-1)
n_values = x.shape[0]
x = bit_decomposition(x)
s, σ, t, τ, out = (
Array(n + 1, λs, n_values),
Array(n + 1, λs, n_values),
Array(n + 1, 1, n_values),
Array(n + 1, 1, n_values),
Array(n + 1, n_values),
)
s[0], *_CW, CW_leaf = k_b
CW_leaf = CW_leaf.astype(np.int64)
t[0] = b
for i in range(0, n):
CWi = uncompress(_CW[i], op=COMP)
dual_state = H(s[i]) ^ (t[i] * CWi)
state = multi_dim_filter(dual_state, x[i])
σ[i + 1], τ[i + 1], s[i + 1], t[i + 1] = split(state, (COMP, λs, 1, λs, 1))
out[i] = (-1) ** b * (τ[i + 1] * CW_leaf[i] + convert(σ[i + 1]))
# Last node, the other σ is also a leaf
out[n] = (-1) ** b * (t[n].squeeze() * CW_leaf[n] + convert(s[n]))
return out.sum(axis=0).astype(np.int64).reshape(original_shape)
def compress(CWi, alpha_i, op=EQ):
"""Compression technique which reduces the size of the CWi by dropping some
non-necessary randomness.
The original paper on FSS explains that this trick doesn't affect the security.
"""
if op == EQ:
sL, tL, sR, tR = split(CWi, (op, λs, 1, λs, 1))
return (tL.astype(np.bool), tR.astype(np.bool), (1 - alpha_i) * sR + alpha_i * sL)
else:
σL, τL, sL, tL, σR, τR, sR, tR = split(CWi, (op, λs, 1, λs, 1, λs, 1, λs, 1))
return (
τL.astype(np.bool),
tL.astype(np.bool),
τR.astype(np.bool),
tR.astype(np.bool),
alpha_i * σR + (1 - alpha_i) * σL,
(1 - alpha_i) * sR + alpha_i * sL,
)
def uncompress(_CWi, op=EQ):
"""Decompress the compressed CWi by duplicating the randomness to recreate
the original shape."""
if op == EQ:
CWi = concat(
_CWi[2],
_CWi[0].reshape(1, -1).astype(np.uint64),
_CWi[2],
_CWi[1].reshape(1, -1).astype(np.uint64),
).reshape(2, 3, -1)
else:
CWi = concat(
_CWi[4],
_CWi[0].reshape(1, -1).astype(np.uint64),
_CWi[5],
_CWi[1].reshape(1, -1).astype(np.uint64),
_CWi[4],
_CWi[2].reshape(1, -1).astype(np.uint64),
_CWi[5],
_CWi[3].reshape(1, -1).astype(np.uint64),
).reshape(2, 6, -1)
return CWi
def Array(*shape):
return np.empty(shape, dtype=np.uint64)
def bit_decomposition(x):
x = x.astype(np.uint32)
n_values = x.shape[0]
x = x.reshape(-1, 1).view(np.uint8)
x = x.reshape(n_values, 4, 1)
x = x >> np.arange(8, dtype=np.uint8)
x = x & 0b1
x = np.flip(x.reshape(n_values, -1)[:, :n], axis=1).T
return x
def randbit(shape):
if len(shape) != 3:
raise ValueError("size of shape is not 3")
byte_dim = shape[-2]
shape_with_bytes = shape[:-2] + (math.ceil(byte_dim / 64), shape[-1])
randvalues = np.random.randint(0, 2 ** 64, size=shape_with_bytes, dtype=np.uint64)
randvalues[:, 0] = randvalues[:, 0] % 2 ** (byte_dim % 64)
return randvalues
def concat(*args, **kwargs):
return np.concatenate(args, **kwargs)
def split_last_bit(buffer):
# Numbers are on 64 bits
return buffer & 0b1111111111111111111111111111111111111111111111111111111111111110, buffer & 0b1
def G(seed):
"""Pseudo Random Generator λ -> 2(λ + 1)"""
if len(seed.shape) != 2:
raise ValueError("size of seed shape needs to be 2")
n_values = seed.shape[1]
if seed.shape[0] != λs:
raise ValueError("check the security parameter and seed shape")
x = seed
x = x.T
dt1 = np.dtype((np.uint64, [("uint8", np.uint8, 8)]))
x2 = x.view(dtype=dt1)
x = x2["uint8"].reshape(*x.shape[:-1], -1)
if x.shape != (n_values, 2 * 8):
raise ValueError(f"shape of x needs to be ({n_values}, 16)")
out = np.empty((n_values, 4 * 8), dtype=np.uint8)
shaloop.sha256_loop_func(x, out)
buffer = out.view(np.uint64).T
valuebits = np.empty((2, 3, n_values), dtype=np.uint64)
# [λ, 1, λ, 1]
# [λ - 64, 64, 1, λ - 64, 64, 1]
valuebits[0, 0], last_bit = split_last_bit(buffer[0])
valuebits[0, 1] = buffer[1]
valuebits[0, 2] = last_bit
valuebits[1, 0], last_bit = split_last_bit(buffer[2])
valuebits[1, 1] = buffer[3]
valuebits[1, 2] = last_bit
return valuebits
empty_dict = {}
def H(seed, idx=0):
"""
Pseudo Random Generator λ -> 4(λ + 1)
idx is here to allow not reusing the same empty dict. Otherwise in key generation
h0 is erased by h1
"""
if len(seed.shape) != 2:
raise ValueError("size of seed shape needs to be 2")
n_values = seed.shape[1]
if seed.shape[0] != λs:
raise ValueError("check the security parameter and seed shape")
x = seed
x = x.T
dt1 = np.dtype((np.uint64, [("uint8", np.uint8, 8)]))
x2 = x.view(dtype=dt1)
x = x2["uint8"].reshape(*x.shape[:-1], -1)
if x.shape != (n_values, 2 * 8):
raise ValueError(f"shape of x needs to be ({n_values}, 16)")
if (n_values, idx) not in empty_dict:
# 64 bytes are needed to store a sha512
empty_dict[(n_values, idx)] = (
np.empty((n_values, 64), dtype=np.uint8),
np.empty((2, 6, n_values), dtype=np.uint64),
)
out, valuebits = empty_dict[(n_values, idx)]
shaloop.sha512_loop_func(x, out)
buffer = out.view(np.uint64).T # is of size 8 * 64 bits
# [λ, 1, λ, 1, λ, 1, λ, 1]
# [λ - 64, 64, 1, λ - 64, 64, 1, λ - 64, 64, 1, λ - 64, 64, 1]
valuebits[0, 0], last_bit = split_last_bit(buffer[0])
valuebits[0, 1] = buffer[1]
valuebits[0, 2] = last_bit
valuebits[0, 3], last_bit = split_last_bit(buffer[2])
valuebits[0, 4] = buffer[3]
valuebits[0, 5] = last_bit
valuebits[1, 0], last_bit = split_last_bit(buffer[4])
valuebits[1, 1] = buffer[5]
valuebits[1, 2] = last_bit
valuebits[1, 3], last_bit = split_last_bit(buffer[6])
valuebits[1, 4] = buffer[7]
valuebits[1, 5] = last_bit
return valuebits
split_helpers = {
(EQ, 2, 1): lambda x: (x[:2], x[2]),
(EQ, 2, 1, 2, 1): lambda x: (x[0, :2], x[0, 2], x[1, :2], x[1, 2]),
(COMP, 2, 1, 2, 1): lambda x: (x[:2], x[2], x[3:5], x[5]),
(COMP, 2, 1, 2, 1, 2, 1, 2, 1): lambda x: (
x[0, :2],
x[0, 2],
x[0, 3:5],
x[0, 5],
x[1, :2],
x[1, 2],
x[1, 3:5],
x[1, 5],
),
}
def split(list_, idx):
return split_helpers[idx](list_)
ones_dict2 = {}
def SwitchTableDPF(s, α_i):
one = np.ones((1, s.shape[1]), dtype=np.uint64)
s_one = concat(s, one)
if s_one.shape not in ones_dict2:
ones_dict2[s_one.shape] = np.ones((1, *s_one.shape), dtype=np.uint64)
ones = ones_dict2[s_one.shape]
pad = (α_i * ones).astype(np.uint64)
pad = concat(1 - pad, pad, axis=0)
Table = pad * s_one
return Table
def SwitchTableDIF(s, σ, α_i):
leafTable = SwitchTableDPF(σ, 1 - α_i)
nextTable = SwitchTableDPF(s, α_i)
Table = concat(leafTable, nextTable, axis=1)
return Table
def multi_dim_filter(τ, idx):
filtered_τ = (1 - idx) * τ[0] + idx * τ[1]
return filtered_τ
def convert(x):
"""
convert a multi dim big tensor to a "random" single tensor
"""
# Select the 31st least significant bits
r = x[-1] & 0b1111_1111_1111_1111_1111_1111_1111_111
return r.astype(np.int64)
|
"""Helpers for config validation using voluptuous."""
from datetime import timedelta
import jinja2
import voluptuous as vol
from homeassistant.loader import get_platform
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE)
from homeassistant.helpers.entity import valid_entity_id
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys):
"""Validator that at least one key exists."""
def validate(obj):
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value):
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isfile(value):
"""Validate that the value is an existing file."""
return vol.IsFile('not a file')(value)
def ensure_list(value):
"""Wrap value in list if it is not one."""
return value if isinstance(value, list) else [value]
def entity_id(value):
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} does not match format <domain>.<object_id>'
.format(value))
def entity_ids(value):
"""Validate Entity IDs."""
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time_period_str(value):
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
time_period = vol.Any(time_period_str, timedelta, time_period_dict)
def log_exception(logger, ex, domain, config):
"""Generate log exception for config validation."""
message = 'Invalid config for [{}]: '.format(domain)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join('%s' % m for m in ex.path))
else:
message += str(ex)
if hasattr(config, '__line__'):
message += " (See {}:{})".format(config.__config_file__,
config.__line__ or '?')
logger.error(message)
def match_all(value):
"""Validator that matches all values."""
return value
def platform_validator(domain):
"""Validate if platform exists for given domain."""
def validator(value):
"""Test if platform exists."""
if value is None:
raise vol.Invalid('platform cannot be None')
if get_platform(domain, str(value)):
return value
raise vol.Invalid(
'platform {} does not exist for {}'.format(value, domain))
return validator
def positive_timedelta(value):
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def string(value):
"""Coerce value to string, except for None."""
if value is not None:
return str(value)
raise vol.Invalid('string value is None')
def temperature_unit(value):
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
elif value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
value = str(value)
try:
jinja2.Environment().parse(value)
return value
except jinja2.exceptions.TemplateSyntaxError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def time(value):
"""Validate time."""
time_val = dt_util.parse_time(value)
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
CONF_SCAN_INTERVAL: vol.All(vol.Coerce(int), vol.Range(min=1)),
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any('sunset', 'sunrise')),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.All(time_period, positive_timedelta)
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA, EVENT_SCHEMA,
CONDITION_SCHEMA)],
)
|
"""pixel.py: Contains pixel class."""
# pylint: disable=E1101,R0902,C0103
__author__ = "Rajiv Giridharagopal"
__copyright__ = "Copyright 2021"
__maintainer__ = "Rajiv Giridharagopal"
__email__ = "rgiri@uw.edu"
__status__ = "Development"
import numpy as np
from scipy import signal as sps
from scipy import integrate as spg
from ffta.pixel_utils import noise
from ffta.pixel_utils import parab
from ffta.pixel_utils import dwavelet
from ffta.pixel_utils import tfp_calc
from ffta.nfmd import NFMD
from matplotlib import pyplot as plt
import pywt
import time
import warnings
class Pixel:
def __init__(self,
signal_array,
params={},
can_params={},
fit=True,
pycroscopy=False,
method='hilbert',
fit_form='product',
filter_amplitude=False,
filter_frequency=False,
recombination=False,
trigger=None,
total_time=None,
sampling_rate=None,
roi=None):
"""
Signal Processing to Extract Time-to-First-Peak.
Extracts Time-to-First-Peak (tFP) from digitized Fast-Free Time-Resolved
Electrostatic Force Microscopy (FF-trEFM) signals [1-2]. It includes a few
types of frequency analysis:
a) Hilbert Transform
b) Wavelet Transform
c) Short-Time Fourier Transform (STFT))
Attributes
----------
n_points : int
Number of points in a signal.
n_signals : int
Number of signals to be averaged in a pixel.
signal_array : (n_signals, n_points) array_like
Array that contains original signals.
signal : (n_points,) array_like
Signal after phase-locking and averaging.
tidx : int
Index of trigger in time-domain.
amplitude : (n_points, ) array_like
Instantaneous amplitude of the signal
phase : (n_points,) array_like
Phase of the signal, only calculated with Hilbert Transform method.
cwt_matrix : (n_widths, n_points) array_like
Wavelet matrix for continuous wavelet transform.
inst_freq : (n_points,) array_like
Instantenous frequency of the signal.
tfp : float
Time from trigger to first-peak, in seconds.
shift : float
Frequency shift from trigger to first-peak, in Hz.
Methods
-------
analyze()
Analyzes signals and returns tfp, shift and inst_freq.
Notes
-----
Frequency shift from wavelet analysis is not in Hertz. It should be used
with caution.
References
----------
.. \[1\] Giridharagopal R, Rayermann GE, Shao G, et al. Submicrosecond time
resolution atomic force microscopy for probing nanoscale dynamics.
Nano Lett. 2012;12(2):893-8.
\[2\] Karatay D, Harrison JA, et al. Fast time-resolved electrostatic
force microscopy: Achieving sub-cycle time resolution. Rev Sci Inst.
2016;87(5):053702
Examples
--------
>>> from ffta import pixel, pixel_utils
>>>
>>> signal_file = '../data/SW_0000.ibw'
>>> params_file = '../data/parameters.cfg'
>>>
>>> signal_array = pixel_utils.load.signal(signal_file)
>>> n_pixels, params = pixel_utils.load.configuration(params_file)
>>>
>>> p = pixel.Pixel(signal_array, params)
>>> tfp, shift, inst_freq = p.analyze()
>>>
>>> p.plot()
:param signal_array: 2D real-valued signal array, corresponds to a pixel.
:type signal_array: (n_points, n_signals) array_like
:param params: Includes parameters for processing, saved by the experiment Required:
trigger = float (in seconds) (required)
total_time = float (in seconds) (either this or sampling rate required)
sampling_rate = int (in Hz) (see above)
These are often supplied but can be a default:
drive_freq = float (in Hz)
roi = float (in seconds)
window = string (see documentation of scipy.signal.get_window)
bandpass_filter = int (0: no filtering, 1: FIR filter, 2: IIR filter)
filter_bandwidth = float (default: 5kHz)
n_taps = integer (default: 1799)
wavelet_analysis = bool (0: Hilbert method, 1: Wavelet Method)
wavelet_parameter = int (default: 5)
recombination = bool (0: Data are for Charging up, 1: Recombination)
fit_phase = bool (0: fit to frequency, 1: fit to phase)
:type params: dict, optional
:param can_params: Contains the cantilever parameters (e.g. AMPINVOLS).
see ffta.pixel_utils.load.cantilever_params
:type can_params: dict, optional
:param fit: Find tFP by just raw minimum (False) or fitting product of 2 exponentials (True)
:type fit: bool, optional
:param pycroscopy: Pycroscopy requires different orientation, so this corrects for this effect.
:type pycroscopy: bool, optional
:param fit_form: Functional form used when fitting.
One of
product: product of two exponentials (default)
sum: sum of two exponentials
exp: single expential decay
ringdown: single exponential decay of amplitude, not frequency, scaled to return Q
:type fit_form: str, optional
:param method: Method for generating instantaneous frequency, amplitude, and phase response
One of
hilbert: Hilbert transform method (default)
wavelet: Morlet CWT approach
stft: short time Fourier transform (sliding FFT)
nfmd: Nonstationary Fourier mode decomposition
:type method: str, optional
:param filter_amplitude: The Hilbert Transform amplitude can sometimes have drive frequency artifact.
:type filter_amplitude: bool, optional
:param filter_frequency: Filters the instantaneous frequency to remove noise peaks
:type filter_frequency: bool, optional
:param recombination: Whether to invert the frequency (during a recombination or positive frequency shift event)
:type recombination: bool, optional
The following are necessary to define a signal, either in params or explicitly
trigger: bool, optional
The point where the event occurs.
total_time: bool, optional
The total time of the signal
sampling_rate: bool, optional
The sampling rate. Note that sampling_rate * total_time must equal number of samples
roi: bool, opertional
The length of the window to find a minimum frequency peak
"""
# Create parameter attributes for optional parameters.
# These defaults are overwritten by values in 'params'
# FIR (Hilbert) filtering parameters
self.n_taps = 1499
self.filter_bandwidth = 5000
self.filter_frequency = filter_frequency
# Wavelet parameters
self.wavelet_analysis = False
self.wavelet = 'cmor1-1' # default complex Morlet wavelet
self.scales = np.arange(100, 2, -1)
self.wavelet_params = {} # currently just optimize flag is supported
# Short Time Fourier Transform
self.fft_analysis = False
self.fft_cycles = 2
self.fft_params = {} # for STFT
self.fft_time_res = 20e-6
# NFMD
self.nfmd_analysis = False
self.num_freqs = 2
self.window_size = 40
self.optimizer_opts = {'lr': 1e-4}
self.max_iters = 100
self.target_loss = 1e-4
self.update_freq = None
# Misc Settings
self.phase_fitting = False
self.check_drive = True
self.window = 'blackman'
self.bandpass_filter = 1
# Assign the fit parameter.
self.fit = fit
self.fit_form = fit_form
self.method = method
self.filter_amplitude = filter_amplitude
self.filter_frequency = filter_frequency
self.recombination = recombination
# Default Cantilever parameters, plugging in some reasonable defaults
self.AMPINVOLS = 122e-9
self.SpringConstant = 23.2
self.k = self.SpringConstant
self.DriveAmplitude = 1.7e-9
self.Mass = 4.55e-12
self.Beta = 3114
self.Q = 360
# Set up the array
self.signal_array = signal_array
self.signal_orig = None # used in amplitude calc to undo any Windowing beforehand
if len(signal_array.shape) == 2 and 1 not in signal_array.shape:
self.n_points, self.n_signals = self.signal_array.shape
else:
self.n_signals = 1
self.signal_array = self.signal_array.flatten()
self.n_points = self.signal_array.shape[0]
self._n_points_orig = self.signal_array.shape[0]
if pycroscopy:
self.signal_array = signal_array.T
# The copy of the signal we will manipulate
self.signal = np.copy(self.signal_array)
# Read parameter attributes from parameters dictionary.
for key, value in params.items():
setattr(self, key, value)
for key, value in can_params.items():
setattr(self, key, float(value))
# Overwrite parameters with explicitly passed parameters
for key, val in zip(['trigger', 'total_time', 'sampling_rate', 'roi'],
[trigger, total_time, sampling_rate, roi]):
if val:
setattr(self, key, val)
# Check for missing required parameters
if self.trigger == None: # trigger can be 0
raise KeyError('Trigger must be supplied')
if not self.total_time:
if not self.sampling_rate:
raise KeyError('total_time or sampling_rate must be supplied')
else:
self.total_time = self.sampling_rate * self.n_points
elif not hasattr(self, 'sampling_rate'):
self.sampling_rate = int(self.n_points / self.total_time)
elif self.total_time != self.n_points / self.sampling_rate:
print(self.n_points / self.sampling_rate)
print(self.total_time)
raise ValueError('total_time and sampling_rate mismatch')
if self.total_time < self.trigger:
self.trigger = 0.1 * self.total_time
if not self.roi:
self.roi = 0.3 * (self.total_time - self.trigger)
warnings.warn('ROI defaulting to 30% post-trigger')
elif self.roi > self.total_time - self.trigger:
print(self.roi)
print(self.total_time - self.trigger)
warnings.warn('roi must not extend beyond the total_time; setting to maximum')
self.roi = self.total_time - self.trigger
self.tidx = int(self.trigger * self.sampling_rate)
self._tidx_orig = self.tidx
self.tidx_orig = self.tidx
if not hasattr(self, 'drive_freq'):
self.average()
self.set_drive()
# Processing parameters
if self.filter_frequency:
self.bandpass_filter = 0 # turns off FIR
# Initialize attributes that are going to be assigned later.
self.signal = np.array([])
self.phase = None
self.inst_freq = None
self.tfp = None
self.shift = None
self.cwt_matrix = None
# For accidental passing ancillary datasets from Pycroscopy, otherwise
# this class will throw an error when pickling (e.g. in pyUSID.Process)
if hasattr(self, 'Position_Indices'):
del self.Position_Indices
if hasattr(self, 'Position_Values'):
del self.Position_Values
if hasattr(self, 'Spectroscopic_Indices'):
del self.Spectroscopic_Indices
if hasattr(self, 'Spectroscopic_Values'):
del self.Spectroscopic_Values
return
def update_parm(self, **kwargs):
"""
Update the parameters, see ffta.pixel.Pixel for details on what to update
e.g. to switch from default Hilbert to Wavelets, for example
:param kwargs:
:type kwargs:
"""
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
return
def remove_dc(self, dc_width=10e3, plot=False):
"""
Removes DC components from each signal using FFT.
:param dc_width:
:type dc_width: float, optional
:param plot:
:type plot: bool, optional
"""
self.signal = np.copy(self.signal_array)
if self.n_signals == 1:
self.signal = np.reshape(self.signal, (self.n_points, self.n_signals))
for i in range(self.n_signals):
f_ax = np.linspace(-self.sampling_rate / 2, self.sampling_rate / 2, self.n_points)
mid = int(len(f_ax) / 2)
# drive_bin = np.searchsorted(f_ax[mid:], self.drive_freq) + mid
delta_freq = self.sampling_rate / self.n_points
SIG_DC = np.fft.fftshift(np.fft.fft(self.signal[:, i]))
SIG_DC[:mid - int(dc_width / delta_freq)] = 0
SIG_DC[mid + int(dc_width / delta_freq):] = 0
sig_dc = np.real(np.fft.ifft(np.fft.ifftshift(SIG_DC)))
self.signal[:, i] -= sig_dc
if plot:
fig, ax = plt.subplots(nrows=2, figsize=(6, 10))
ax[0].plot(np.arange(0, self.total_time, 1 / self.sampling_rate), sig_dc, 'b')
ax[1].plot(np.arange(0, self.total_time, 1 / self.sampling_rate), self.signal_array, 'b')
ax[1].plot(np.arange(0, self.total_time, 1 / self.sampling_rate), self.signal, 'r')
plt.title('DC Offset')
if self.n_signals == 1:
self.signal = self.signal[:, 0]
return
def phase_lock(self):
"""
Phase-locks signals in the signal array. This also cuts signals.
"""
# Phase-lock signals.
self.signal_array, self.tidx = noise.phase_lock(self.signal_array, self.tidx,
np.ceil(self.sampling_rate / self.drive_freq))
# Update number of points after phase-locking.
self.n_points = self.signal_array.shape[0]
return
def average(self):
"""
Averages signals.
"""
if self.n_signals != 1: # if not multi-signal, don't average
# self.signal = self.signal_array.mean(axis=1)
self.signal = self.signal.mean(axis=1)
return
def set_drive(self):
"""
Calculates drive frequency of averaged signals
"""
n_fft = 2 ** int(np.log2(self.tidx)) # For FFT, power of 2.
dfreq = self.sampling_rate / n_fft # Frequency separation.
# Calculate drive frequency from maximum power of the FFT spectrum.
signal = self.signal[:n_fft]
fft_amplitude = np.abs(np.fft.rfft(signal))
drive_freq = fft_amplitude.argmax() * dfreq
self.drive_freq = drive_freq
return
def check_drive_freq(self):
"""
Calculates drive frequency of averaged signals, and check against
the given drive frequency.
"""
n_fft = 2 ** int(np.log2(self.tidx)) # For FFT, power of 2.
dfreq = self.sampling_rate / n_fft # Frequency separation.
# Calculate drive frequency from maximum power of the FFT spectrum.
signal = self.signal[:n_fft]
fft_amplitude = np.abs(np.fft.rfft(signal))
drive_freq = fft_amplitude.argmax() * dfreq
# Difference between given and calculated drive frequencies.
difference = np.abs(drive_freq - self.drive_freq)
# If difference is too big, reassign. Otherwise, continue. != 0 for accidental DC errors
if difference >= dfreq and drive_freq != 0:
self.drive_freq = drive_freq
return
def apply_window(self):
"""
Applies the window given in parameters.
"""
self.signal *= sps.get_window(self.window, self.n_points)
return
def dwt_denoise(self):
"""
Uses DWT to denoise the signal prior to processing.
"""
rate = self.sampling_rate
lpf = self.drive_freq * 0.1
self.signal, _, _ = dwavelet.dwt_denoise(self.signal, lpf, rate / 2, rate)
return
def fir_filter(self):
"""
Filters signal with a FIR bandpass filter.
"""
# Calculate bandpass region from given parameters.
nyq_rate = 0.5 * self.sampling_rate
bw_half = self.filter_bandwidth / 2
freq_low = (self.drive_freq - bw_half) / nyq_rate
freq_high = (self.drive_freq + bw_half) / nyq_rate
band = [freq_low, freq_high]
# Create taps using window method.
try:
taps = sps.firwin(int(self.n_taps), band, pass_zero=False,
window='blackman')
except:
print('band=', band)
print('nyq=', nyq_rate)
print('drive=', self.drive_freq)
self.signal = sps.fftconvolve(self.signal, taps, mode='same')
# Shifts trigger due to causal nature of FIR filter
self.tidx -= (self.n_taps - 1) / 2
return
def iir_filter(self):
"""
Filters signal with two Butterworth filters (one lowpass,
one highpass) using filtfilt. This method has linear phase and no
time delay.
"""
# Calculate bandpass region from given parameters.
nyq_rate = 0.5 * self.sampling_rate
bw_half = self.filter_bandwidth / 2
freq_low = (self.drive_freq - bw_half) / nyq_rate
freq_high = (self.drive_freq + bw_half) / nyq_rate
# Do a high-pass filtfilt operation.
b, a = sps.butter(9, freq_low, btype='high')
self.signal = sps.filtfilt(b, a, self.signal)
# Do a low-pass filtfilt operation.
b, a = sps.butter(9, freq_high, btype='low')
self.signal = sps.filtfilt(b, a, self.signal)
return
def amplitude_filter(self):
'''
Filters the drive signal out of the amplitude response
'''
AMP = np.fft.fftshift(np.fft.fft(self.amplitude))
DRIVE = self.drive_freq / (self.sampling_rate / self.n_points) # drive location in frequency space
center = int(len(AMP) / 2)
# crude boxcar
AMP[:center - int(DRIVE / 2) + 1] = 0
AMP[center + int(DRIVE / 2) - 1:] = 0
self.amplitude = np.abs(np.fft.ifft(np.fft.ifftshift(AMP)))
return
def frequency_filter(self):
'''
Filters the instantaneous frequency around DC peak to remove noise
Uses self.filter_bandwidth for the frequency filter
'''
FREQ = np.fft.fftshift(np.fft.fft(self.inst_freq))
center = int(len(FREQ) / 2)
df = self.sampling_rate / self.n_points
drive_bin = int(np.ceil(self.drive_freq / df))
bin_width = int(self.filter_bandwidth / df)
if bin_width > drive_bin:
print('width exceeds first resonance')
bin_width = drive_bin - 1
FREQ[:center - bin_width] = 0
FREQ[center + bin_width:] = 0
self.inst_freq = np.real(np.fft.ifft(np.fft.ifftshift(FREQ)))
return
def frequency_harmonic_filter(self, width=5):
'''
Filters the instantaneous frequency to remove noise
Defaults to DC and then every multiple harmonic up to sampling
:param width: Size of the boxcar around the various peaks
:type width: int, optional
'''
FREQ = np.fft.fftshift(np.fft.fft(self.inst_freq))
center = int(len(FREQ) / 2)
# Find drive_bin
df = self.sampling_rate / self.n_points
drive_bin = int(np.ceil(self.drive_freq / df))
bins = np.arange(len(FREQ) / 2)[::drive_bin]
bins = np.append(center - bins, center + bins)
FREQ_filt = np.zeros(len(FREQ), dtype='complex128')
for b in bins:
FREQ_filt[int(b) - width:int(b) + width] = FREQ[int(b) - width:int(b) + width]
self.inst_freq = np.real(np.fft.ifft(np.fft.ifftshift(FREQ)))
return
def hilbert(self):
"""
Analytical signal and calculate phase/frequency via Hilbert transform
"""
self.hilbert_transform()
self.calculate_amplitude()
self.calculate_phase()
self.calculate_inst_freq()
return
def hilbert_transform(self):
"""
Gets the analytical signal doing a Hilbert transform.
"""
self.signal = sps.hilbert(self.signal)
return
def calculate_amplitude(self):
"""
Calculates the amplitude of the analytic signal. Uses pre-filter
signal to do this.
"""
#
if self.n_signals != 1:
signal_orig = self.signal_array.mean(axis=1)
else:
signal_orig = self.signal_array
self.amplitude = np.abs(sps.hilbert(signal_orig))
if not np.isnan(self.AMPINVOLS):
self.amplitude *= self.AMPINVOLS
return
def calculate_power_dissipation(self):
"""
Calculates the power dissipation using amplitude, phase, and frequency
and the Cleveland eqn (see DOI:10.1063/1.121434)
"""
phase = self.phase # + np.pi/2 #offsets the phase to be pi/2 at resonance
A = self.k / self.Q * self.amplitude ** 2 * (self.inst_freq + self.drive_freq)
B = self.Q * self.DriveAmplitude * np.sin(phase) / self.amplitude
C = self.inst_freq / self.drive_freq
self.power_dissipated = A * (B - C)
return
def calculate_phase(self, correct_slope=True):
"""
Gets the phase of the signal and correct the slope by removing
the drive phase.
:param correct_slope:
:type correct_slope: bool, optional
"""
# Unwrap the phase.
self.phase = np.unwrap(np.angle(self.signal))
try:
if correct_slope:
# Remove the drive from phase.
# self.phase -= (2 * np.pi * self.drive_freq *
# np.arange(self.n_points) / self.sampling_rate)
# A curve fit on the initial part to make sure that it worked.
start = int(0.3 * self.tidx)
end = int(0.7 * self.tidx)
fit = self.phase[start:end]
xfit = np.polyfit(np.arange(start, end), fit, 1)
# Remove the fit from phase.
self.phase -= (xfit[0] * np.arange(self.n_points)) + xfit[1]
except:
self.phase -= (2 * np.pi * self.drive_freq *
np.arange(self.n_points) / self.sampling_rate)
self.phase = -self.phase # need to correct for negative in DDHO solution
self.phase += np.pi / 2 # corrects to be at resonance pre-trigger
return
def calculate_inst_freq(self):
"""
Calculates the first derivative of the phase using Savitzky-Golay
filter.
"""
dtime = 1 / self.sampling_rate # Time step.
# Do a Savitzky-Golay smoothing derivative
# using 5 point 1st order polynomial.
# -self.phase to correct for sign in DDHO solution
self.inst_freq_raw = sps.savgol_filter(-self.phase, 5, 1, deriv=1,
delta=dtime)
# Bring trigger to zero.
self.tidx = int(self.tidx)
self.inst_freq = self.inst_freq_raw - self.inst_freq_raw[self.tidx]
return
def calculate_cwt(self, f_center=None, verbose=False, optimize=False, fit=False,
calc_phase=False):
'''
Calculate instantaneous frequency using continuous wavelet transfer
wavelet specified in self.wavelet. See PyWavelets CWT documentation
:param f_center:
:type f_center:
:param verbose:
:type verbose: bool, optional
:param optimize: Currently placeholder for iteratively determining wavelet scales
:type optimize: bool, optionals
:param fit: Whether to curve-fit for ridge finding or use parabolic approximation
:type fit: bool, optional
:param calc_phase: Calculates teh Phase (not usually needed)
:type calc_phase : bool, optional
'''
# wavlist = pywt.wavelist(kind='continuous')
# w0, wavelet_increment, cwt_scale = self.__get_cwt__()
# determine if scales will capture the relevant frequency
if not f_center:
f_center = self.drive_freq
dt = 1 / self.sampling_rate
sc = pywt.scale2frequency(self.wavelet, self.scales) / dt
if verbose:
print('Wavelet scale from', np.min(sc), 'to', np.max(sc))
if f_center < np.min(sc) or f_center > np.max(sc):
raise ValueError('Choose a scale that captures frequency of interest')
if optimize:
print('!')
drive_bin = self.scales[np.searchsorted(sc, f_center)]
hi = int(1.2 * drive_bin)
lo = int(0.8 * drive_bin)
self.scales = np.arange(hi, lo, -0.1)
spectrogram, freq = pywt.cwt(self.signal, self.scales, self.wavelet, sampling_period=dt)
if not fit:
inst_freq, amplitude, _ = parab.ridge_finder(np.abs(spectrogram), np.arange(len(freq)))
# slow serial curve fitting
else:
inst_freq = np.zeros(self.n_points)
amplitude = np.zeros(self.n_points)
for c in range(spectrogram.shape[1]):
SIG = spectrogram[:, c]
if fit:
pk = np.argmax(np.abs(SIG))
popt = np.polyfit(np.arange(20),
np.abs(SIG[pk - 10:pk + 10]), 2)
inst_freq[c] = -0.5 * popt[1] / popt[0]
amplitude[c] = np.abs(SIG)[pk]
# rescale to correct frequency
inst_freq = pywt.scale2frequency(self.wavelet, inst_freq + self.scales[0]) / dt
if calc_phase:
phase = spg.cumtrapz(inst_freq)
phase = np.append(phase, phase[-1])
else:
phase = np.zeros(len(inst_freq))
tidx = int(self.tidx * len(inst_freq) / self.n_points)
self.amplitude = amplitude
self.inst_freq_raw = inst_freq
self.inst_freq = -1 * (inst_freq - inst_freq[tidx]) # -1 due to way scales are ordered
self.spectrogram = np.abs(spectrogram)
self.wavelet_freq = freq # the wavelet frequencies
# subtract the w*t line (drive frequency line) from phase
if calc_phase:
start = int(0.3 * tidx)
end = int(0.7 * tidx)
xfit = np.polyfit(np.arange(start, end), phase[start:end], 1)
phase -= (xfit[0] * np.arange(len(inst_freq))) + xfit[1]
self.phase = phase
return
def calculate_stft(self, nfft=200, calc_phase=False):
'''
Sliding FFT approach
:param nfft: Length of FFT calculated in the spectrogram. More points gets much slower
but the longer the FFT the finer the frequency bin spacing
:type nfft: int
:param calc_phase: Calculates teh Phase (not usually needed)
:type calc_phase: bool, optional
'''
pts_per_ncycle = int(self.fft_time_res * self.sampling_rate)
if nfft < pts_per_ncycle:
print('Error with nfft setting')
nfft = pts_per_ncycle
if pts_per_ncycle > len(self.signal):
pts_per_ncycle = len(self.signal)
# drivebin = int(self.drive_freq / (self.sampling_rate / nfft ))
freq, times, spectrogram = sps.spectrogram(self.signal,
self.sampling_rate,
nperseg=pts_per_ncycle,
noverlap=pts_per_ncycle - 1,
nfft=nfft,
window=self.window,
mode='magnitude')
# Parabolic ridge finder
inst_freq, amplitude, _ = parab.ridge_finder(spectrogram, freq)
# Correctly pad the signals
_pts = self.n_points - len(inst_freq)
_pre = int(np.floor(_pts / 2))
_post = int(np.ceil(_pts / 2))
inst_freq = np.pad(inst_freq, (_pre, _post))
amplitude = np.pad(amplitude, (_pre, _post))
if calc_phase:
phase = spg.cumtrapz(inst_freq)
phase = np.append(phase, phase[-1])
else:
phase = np.zeros(len(inst_freq))
tidx = int(self.tidx * len(inst_freq) / self.n_points)
self.amplitude = amplitude
self.inst_freq_raw = inst_freq
self.inst_freq = inst_freq - inst_freq[tidx]
self.spectrogram = spectrogram
self.stft_freq = freq
self.stft_times = times
# subtract the w*t line (drive frequency line) from phase
if calc_phase:
start = int(0.3 * tidx)
end = int(0.7 * tidx)
xfit = np.polyfit(np.arange(start, end), phase[start:end], 1)
phase -= (xfit[0] * np.arange(len(inst_freq))) + xfit[1]
self.phase = phase
return
def calculate_nfmd(self, calc_phase=False, override_window=True, verbose=False):
'''
Nonstationary Fourier Mode Decomposition Approach
:param calc_phase: Calculates the Phase (not usually needed)
:type calc_phase: bool, optional
:param override_window: Automatically adjusts window to be integer number of cycles
:type override_window: bool, optional
:param verbose: Console feedback
:type verbose: bool, optional
'''
if not self.signal.any():
self.signal = self.signal_array
self.average()
z = self.signal
if override_window:
win_size_cycle = int(self.sampling_rate / self.drive_freq)
self.window_size = (self.window_size // win_size_cycle) * win_size_cycle
if verbose:
print('window size automatically adjusted to ', self.window_size)
nfmd = NFMD(z / np.std(z),
num_freqs=self.num_freqs,
window_size=self.window_size,
optimizer_opts=self.optimizer_opts,
max_iters=self.max_iters,
target_loss=self.target_loss)
if verbose:
freqs, A, losses, indices = nfmd.decompose_signal(self.update_freq)
else:
freqs, A, losses, indices = nfmd.decompose_signal()
dt = 1 / self.sampling_rate
self.n_freqs = nfmd.correct_frequencies(dt=dt)
self.n_amps = nfmd.compute_amps()
self.n_mean = nfmd.compute_mean()
self.inst_freq = self.n_freqs[:, 0]
self.amplitude = self.n_amps[:, 0]
if calc_phase:
phase = spg.cumtrapz(self.inst_freq)
self.phase = np.append(phase, phase[-1])
else:
self.phase = self.n_mean
return
def find_tfp(self):
"""
Calculate tfp and shift based self.fit_form and self.fit selection
"""
ridx = int(self.roi * self.sampling_rate)
cut = np.copy(self.inst_freq[self.tidx:(self.tidx + ridx)])
cut -= self.inst_freq[self.tidx]
self.cut = cut
t = np.arange(cut.shape[0]) / self.sampling_rate
try:
if not self.fit:
tfp_calc.find_minimum(self, cut)
elif self.fit_form == 'sum':
tfp_calc.fit_freq_sum(self, cut, t)
elif self.fit_form == 'exp':
if self.method == 'nfmd':
cut = np.copy(self.phase[self.tidx:(self.tidx + ridx)])
cut -= self.phase[self.tidx]
self.cut = cut
tfp_calc.fit_freq_exp(self, cut, t)
elif self.fit_form == 'ringdown':
cut = self.amplitude[self.tidx:(self.tidx + ridx)]
tfp_calc.fit_ringdown(self, cut, t)
elif self.fit_form == 'product':
tfp_calc.fit_freq_product(self, cut, t)
elif self.fit_form == 'phase':
cut = -1 * (self.phase[self.tidx:(self.tidx + ridx)] - self.phase[self.tidx])
tfp_calc.fit_phase(self, cut, t)
except:
self.tfp = np.nan
self.shift = np.nan
self.best_fit = np.zeros(cut.shape[0])
print('error with fitting')
if not (self.method == 'nfmd' and self.fit_form == 'exp'):
self.cut += self.inst_freq[self.tidx]
self.best_fit += self.inst_freq[self.tidx]
else:
self.cut += self.phase[self.tidx]
self.best_fit += self.phase[self.tidx]
del cut
return
def restore_signal(self):
"""
Restores the signal length and position of trigger to original
values.
"""
# Difference between current and original values.
d_trig = int(self._tidx_orig - self.tidx)
d_points = int(self._n_points_orig - self.n_points)
# Check if the signal length can accomodate the shift or not.
if d_trig >= d_points:
# Pad from left and set the original length.
self.inst_freq = np.pad(self.inst_freq, (d_trig, 0), 'edge')
self.inst_freq = self.inst_freq[:self._n_points_orig]
self.phase = np.pad(self.phase, (d_trig, 0), 'edge')
self.phase = self.phase[:self._n_points_orig]
self.amplitude = np.pad(self.amplitude, (d_trig, 0), 'edge')
self.amplitude = self.amplitude[:self._n_points_orig]
else:
# Calculate how many points is needed for padding from right.
pad_right = d_points - d_trig
self.inst_freq = np.pad(self.inst_freq, (d_trig, pad_right),
'edge')
self.phase = np.pad(self.phase, (d_trig, pad_right),
'edge')
self.amplitude = np.pad(self.amplitude, (d_trig, pad_right),
'edge')
# Set the public variables back to original values.
self.tidx = self._tidx_orig
self.n_points = self._n_points_orig
return
def plot(self, newplot=True, fit=True):
"""
Quick visualization of best_fit and cut.
:param newplot: generates a new plot (True) or plots on existing plot figure (False)
:type newplot: bool, optional
:param fit: Overlays fit on the instantaneous frequency image
:type fit: bool, opttional
"""
if newplot:
fig, a = plt.subplots(nrows=3, figsize=(6, 9), facecolor='white')
dt = 1 / self.sampling_rate
ridx = int(self.roi * self.sampling_rate)
fidx = int(self.tidx)
cut = self.amplitude[fidx:(fidx + ridx)]
cut = [fidx, (fidx + ridx)]
tx = np.arange(cut[0], cut[1]) * dt
a[0].plot(tx * 1e3, self.inst_freq[cut[0]:cut[1]], 'r-')
if fit:
if self.fit_form == 'ringdown':
a[1].plot(tx * 1e3, self.best_fit, 'g--')
elif self.fit_form == 'exp' and self.method == 'nfmd':
a[2].plot(tx * 1e3, self.best_fit + self.phase[self.tidx], 'g--')
else:
a[0].plot(tx * 1e3, self.best_fit, 'g--')
a[1].plot(tx * 1e3, self.amplitude[cut[0]:cut[1]], 'b')
if self.fit_form == 'exp' and self.method == 'nfmd':
a[2].plot(tx * 1e3, self.phase[cut[0]:cut[1]], 'm')
else:
a[2].plot(tx * 1e3, self.phase[cut[0]:cut[1]] * 180 / np.pi, 'm')
a[0].set_title('Instantaneous Frequency')
a[0].set_ylabel('Frequency Shift (Hz)')
a[1].set_ylabel('Amplitude (nm)')
a[2].set_ylabel('Phase (deg)')
a[2].set_xlabel('Time (ms)')
plt.tight_layout()
return
def generate_inst_freq(self, timing=False):
"""
Generates the instantaneous frequency
:param timing: prints the time to execute (for debugging)
:type timing: bool, optional
:returns: tuple (inst_freq, amplitude, phase)
WHERE
array_like inst_freq is instantaneous frequency of the signal. in the format (n_points,)
[type] amplitude is...
[type] phase is...
"""
if timing:
t1 = time.time()
# Remove DC component, first.
if self.method != 'nfmd':
self.remove_dc()
# Phase-lock signals.
# self.phase_lock()
# Average signals.
self.average()
# Remove DC component again, introduced by phase-locking.
# self.remove_dc()
# Check the drive frequency.
if self.check_drive and self.method != 'nfmd':
self.check_drive_freq()
# DWT Denoise
# self.dwt_denoise()
if self.method == 'wavelet':
# Calculate instantenous frequency using wavelet transform.
self.calculate_cwt(**self.wavelet_params)
elif self.method == 'stft':
# Calculate instantenous frequency using sliding FFT
self.calculate_stft(**self.fft_params)
elif self.method == 'nfmd':
# Nonstationary Fourier Mode Decomposition
self.calculate_nfmd()
elif self.method == 'hilbert':
# Hilbert transform method
# Apply window.
if self.window != 0:
self.apply_window()
# Filter the signal with a filter, if wanted.
if self.bandpass_filter == 1:
self.fir_filter()
elif self.bandpass_filter == 2:
self.iir_filter()
# Get the analytical signal doing a Hilbert transform.
self.hilbert()
# Filter out oscillatory noise from amplitude
if self.filter_amplitude:
self.amplitude_filter()
else:
raise ValueError('Invalid analysis method! Valid options: hilbert, wavelet, fft')
if timing:
print('Time:', time.time() - t1, 's')
# Filter out oscillatory noise from instantaneous frequency
if self.filter_frequency:
self.frequency_filter()
return self.inst_freq, self.amplitude, self.phase
def analyze(self):
"""
Analyzes the pixel with the given method.
:returns: tuple (tfp, shift, inst_freq)
WHERE
float tfp is time from trigger to first-peak, in seconds.
float shift is frequency shift from trigger to first-peak, in Hz.
array_like inst_freq is instantenous frequency of the signal in format (n_points,)
"""
self.inst_freq, self.amplitude, self.phase = self.generate_inst_freq()
# If it's a recombination image invert it to find minimum.
if self.recombination:
self.inst_freq = self.inst_freq * -1
# Find where the minimum is.
self.find_tfp()
# Restore the length due to FIR filter being causal
if self.method == 'hilbert':
self.restore_signal()
# If it's a recombination image invert it to find minimum.
if self.recombination:
self.inst_freq = self.inst_freq * -1
self.best_fit = self.best_fit * -1
self.cut = self.cut * -1
if self.phase_fitting:
return self.tfp, self.shift, self.phase
else:
return self.tfp, self.shift, self.inst_freq
|
""" Python Character Mapping Codec generated z 'VENDORS/MICSFT/PC/CP862.TXT' przy gencodec.py.
"""#"
zaimportuj codecs
### Codec APIs
klasa Codec(codecs.Codec):
def encode(self,input,errors='strict'):
zwróć codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
zwróć codecs.charmap_decode(input,errors,decoding_table)
klasa IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=Nieprawda):
zwróć codecs.charmap_encode(input,self.errors,encoding_map)[0]
klasa IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=Nieprawda):
zwróć codecs.charmap_decode(input,self.errors,decoding_table)[0]
klasa StreamWriter(Codec,codecs.StreamWriter):
dalej
klasa StreamReader(Codec,codecs.StreamReader):
dalej
### encodings module API
def getregentry():
zwróć codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
'\u05d1' # 0x0081 -> HEBREW LETTER BET
'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
'\u05d3' # 0x0083 -> HEBREW LETTER DALET
'\u05d4' # 0x0084 -> HEBREW LETTER HE
'\u05d5' # 0x0085 -> HEBREW LETTER VAV
'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x0087 -> HEBREW LETTER HET
'\u05d8' # 0x0088 -> HEBREW LETTER TET
'\u05d9' # 0x0089 -> HEBREW LETTER YOD
'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
'\u05db' # 0x008b -> HEBREW LETTER KAF
'\u05dc' # 0x008c -> HEBREW LETTER LAMED
'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
'\u05de' # 0x008e -> HEBREW LETTER MEM
'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x0090 -> HEBREW LETTER NUN
'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x0094 -> HEBREW LETTER PE
'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
'\u05e7' # 0x0097 -> HEBREW LETTER QOF
'\u05e8' # 0x0098 -> HEBREW LETTER RESH
'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
'\u05ea' # 0x009a -> HEBREW LETTER TAV
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
import sys
# Let's do some dep checking and handle missing ones gracefully
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import Qt
import PyQt4.QtCore as QtCore
except ImportError:
print "You need to have PyQT installed to run Electrum-RADC in graphical mode."
print "If you have pip installed try 'sudo pip install pyqt' if you are on Debian/Ubuntu try 'sudo apt-get install python-qt4'."
sys.exit(0)
from decimal import Decimal as D
from electrum_doge.util import get_resource_path as rsrc
from electrum_doge.bitcoin import is_valid
from electrum_doge.i18n import _
import decimal
import json
import os.path
import random
import re
import time
from electrum_doge.wallet import Wallet, WalletStorage
import webbrowser
import history_widget
import receiving_widget
from electrum_doge import util
import datetime
from electrum_doge.version import ELECTRUM_VERSION as electrum_version
from electrum_doge.util import format_satoshis, age
from main_window import ElectrumWindow
import shutil
from util import *
bitcoin = lambda v: v * 100000000
def IconButton(filename, parent=None):
pixmap = QPixmap(filename)
icon = QIcon(pixmap)
return QPushButton(icon, "", parent)
def resize_line_edit_width(line_edit, text_input):
metrics = QFontMetrics(qApp.font())
# Create an extra character to add some space on the end
text_input += "A"
line_edit.setMinimumWidth(metrics.width(text_input))
def load_theme_name(theme_path):
try:
with open(os.path.join(theme_path, "name.cfg")) as name_cfg_file:
return name_cfg_file.read().rstrip("\n").strip()
except IOError:
return None
def theme_dirs_from_prefix(prefix):
if not os.path.exists(prefix):
return []
theme_paths = {}
for potential_theme in os.listdir(prefix):
theme_full_path = os.path.join(prefix, potential_theme)
theme_css = os.path.join(theme_full_path, "style.css")
if not os.path.exists(theme_css):
continue
theme_name = load_theme_name(theme_full_path)
if theme_name is None:
continue
theme_paths[theme_name] = prefix, potential_theme
return theme_paths
def load_theme_paths():
theme_paths = {}
theme_paths.update(theme_dirs_from_prefix(util.data_dir()))
return theme_paths
class TransactionWindow(QDialog):
def set_label(self):
label = unicode(self.label_edit.text())
self.parent.wallet.labels[self.tx_id] = label
super(TransactionWindow, self).accept()
def __init__(self, transaction_id, parent):
super(TransactionWindow, self).__init__()
self.tx_id = str(transaction_id)
self.parent = parent
self.setModal(True)
self.resize(200,100)
self.setWindowTitle(_("Transaction successfully sent"))
self.layout = QGridLayout(self)
history_label = "%s\n%s" % (_("Your transaction has been sent."), _("Please enter a label for this transaction for future reference."))
self.layout.addWidget(QLabel(history_label))
self.label_edit = QLineEdit()
self.label_edit.setPlaceholderText(_("Transaction label"))
self.label_edit.setObjectName("label_input")
self.label_edit.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.label_edit.setFocusPolicy(Qt.ClickFocus)
self.layout.addWidget(self.label_edit)
self.save_button = QPushButton(_("Save"))
self.layout.addWidget(self.save_button)
self.save_button.clicked.connect(self.set_label)
self.exec_()
class MiniWindow(QDialog):
def __init__(self, actuator, expand_callback, config):
super(MiniWindow, self).__init__()
self.actuator = actuator
self.config = config
self.btc_balance = None
self.use_exchanges = ["Blockchain", "CoinDesk"]
self.quote_currencies = ["BRL", "CNY", "EUR", "GBP", "RUB", "USD"]
self.actuator.set_configured_currency(self.set_quote_currency)
self.actuator.set_configured_exchange(self.set_exchange)
# Needed because price discovery is done in a different thread
# which needs to be sent back to this main one to update the GUI
self.connect(self, SIGNAL("refresh_balance()"), self.refresh_balance)
self.balance_label = BalanceLabel(self.change_quote_currency, self)
self.balance_label.setObjectName("balance_label")
# Bitcoin address code
self.address_input = QLineEdit()
self.address_input.setPlaceholderText(_("Enter a Dogecoin address or contact"))
self.address_input.setObjectName("address_input")
self.address_input.setFocusPolicy(Qt.ClickFocus)
self.address_input.textChanged.connect(self.address_field_changed)
resize_line_edit_width(self.address_input,
"1BtaFUr3qVvAmwrsuDuu5zk6e4s2rxd2Gy")
self.address_completions = QStringListModel()
address_completer = QCompleter(self.address_input)
address_completer.setCaseSensitivity(False)
address_completer.setModel(self.address_completions)
self.address_input.setCompleter(address_completer)
address_layout = QHBoxLayout()
address_layout.addWidget(self.address_input)
self.amount_input = QLineEdit()
self.amount_input.setPlaceholderText(_("... and amount") + " (%s)"%self.actuator.g.base_unit())
self.amount_input.setObjectName("amount_input")
self.amount_input.setFocusPolicy(Qt.ClickFocus)
# This is changed according to the user's displayed balance
self.amount_validator = QDoubleValidator(self.amount_input)
self.amount_validator.setNotation(QDoubleValidator.StandardNotation)
self.amount_validator.setDecimals(8)
self.amount_input.setValidator(self.amount_validator)
# This removes the very ugly OSX highlighting, please leave this in :D
self.address_input.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.amount_input.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.amount_input.textChanged.connect(self.amount_input_changed)
#if self.actuator.g.wallet.seed:
self.send_button = QPushButton(_("&Send"))
#else:
# self.send_button = QPushButton(_("&Create"))
self.send_button.setObjectName("send_button")
self.send_button.setDisabled(True);
self.send_button.clicked.connect(self.send)
# Creating the receive button
self.switch_button = QPushButton( QIcon(":icons/switchgui.png"),'' )
self.switch_button.setMaximumWidth(25)
self.switch_button.setFlat(True)
self.switch_button.clicked.connect(expand_callback)
main_layout = QGridLayout(self)
main_layout.addWidget(self.balance_label, 0, 0, 1, 3)
main_layout.addWidget(self.switch_button, 0, 3)
main_layout.addWidget(self.address_input, 1, 0, 1, 4)
main_layout.addWidget(self.amount_input, 2, 0, 1, 2)
main_layout.addWidget(self.send_button, 2, 2, 1, 2)
self.send_button.setMaximumWidth(125)
self.history_list = history_widget.HistoryWidget()
self.history_list.setObjectName("history")
self.history_list.hide()
self.history_list.setAlternatingRowColors(True)
main_layout.addWidget(self.history_list, 3, 0, 1, 4)
self.receiving = receiving_widget.ReceivingWidget(self)
self.receiving.setObjectName("receiving")
# Add to the right side
self.receiving_box = QGroupBox(_("Select a receiving address"))
extra_layout = QGridLayout()
# Checkbox to filter used addresses
hide_used = QCheckBox(_('Hide used addresses'))
hide_used.setChecked(True)
hide_used.stateChanged.connect(self.receiving.toggle_used)
# Events for receiving addresses
self.receiving.clicked.connect(self.receiving.copy_address)
self.receiving.itemDoubleClicked.connect(self.receiving.edit_label)
self.receiving.itemChanged.connect(self.receiving.update_label)
# Label
extra_layout.addWidget( QLabel(_('Selecting an address will copy it to the clipboard.') + '\n' + _('Double clicking the label will allow you to edit it.') ),0,0)
extra_layout.addWidget(self.receiving, 1,0)
extra_layout.addWidget(hide_used, 2,0)
extra_layout.setColumnMinimumWidth(0,200)
self.receiving_box.setLayout(extra_layout)
main_layout.addWidget(self.receiving_box,0,4,-1,3)
self.receiving_box.hide()
self.main_layout = main_layout
quit_shortcut = QShortcut(QKeySequence("Ctrl+Q"), self)
quit_shortcut.activated.connect(self.close)
close_shortcut = QShortcut(QKeySequence("Ctrl+W"), self)
close_shortcut.activated.connect(self.close)
g = self.config.get("winpos-lite",[4, 25, 351, 149])
self.setGeometry(g[0], g[1], g[2], g[3])
show_hist = self.config.get("gui_show_history",False)
self.show_history(show_hist)
show_hist = self.config.get("gui_show_receiving",False)
self.toggle_receiving_layout(show_hist)
self.setWindowIcon(QIcon(":icons/electrum-doge.png"))
self.setWindowTitle("Electrum-RADC")
self.setWindowFlags(Qt.Window|Qt.MSWindowsFixedSizeDialogHint)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.setObjectName("main_window")
def context_menu(self):
view_menu = QMenu()
themes_menu = view_menu.addMenu(_("&Themes"))
selected_theme = self.actuator.selected_theme()
theme_group = QActionGroup(self)
for theme_name in self.actuator.theme_names():
theme_action = themes_menu.addAction(theme_name)
theme_action.setCheckable(True)
if selected_theme == theme_name:
theme_action.setChecked(True)
class SelectThemeFunctor:
def __init__(self, theme_name, toggle_theme):
self.theme_name = theme_name
self.toggle_theme = toggle_theme
def __call__(self, checked):
if checked:
self.toggle_theme(self.theme_name)
delegate = SelectThemeFunctor(theme_name, self.toggle_theme)
theme_action.toggled.connect(delegate)
theme_group.addAction(theme_action)
view_menu.addSeparator()
show_receiving = view_menu.addAction(_("Show Receiving addresses"))
show_receiving.setCheckable(True)
show_receiving.toggled.connect(self.toggle_receiving_layout)
show_receiving.setChecked(self.config.get("gui_show_receiving",False))
show_history = view_menu.addAction(_("Show History"))
show_history.setCheckable(True)
show_history.toggled.connect(self.show_history)
show_history.setChecked(self.config.get("gui_show_history",False))
return view_menu
def toggle_theme(self, theme_name):
self.actuator.change_theme(theme_name)
# Recompute style globally
qApp.style().unpolish(self)
qApp.style().polish(self)
def closeEvent(self, event):
g = self.geometry()
self.config.set_key("winpos-lite", [g.left(),g.top(),g.width(),g.height()],True)
self.actuator.g.closeEvent(event)
qApp.quit()
def pay_from_URI(self, URI):
try:
dest_address, amount, label, message, request_url = util.parse_URI(URI)
except:
return
self.address_input.setText(dest_address)
self.address_field_changed(dest_address)
self.amount_input.setText(str(amount))
def activate(self):
pass
def deactivate(self):
pass
def set_exchange(self, use_exchange):
if use_exchange not in self.use_exchanges:
return
self.use_exchanges.remove(use_exchange)
self.use_exchanges.insert(0, use_exchange)
self.refresh_balance()
def set_quote_currency(self, currency):
"""Set and display the fiat currency country."""
if currency not in self.quote_currencies:
return
self.quote_currencies.remove(currency)
self.quote_currencies.insert(0, currency)
self.refresh_balance()
def change_quote_currency(self, forward=True):
if forward:
self.quote_currencies = \
self.quote_currencies[1:] + self.quote_currencies[0:1]
else:
self.quote_currencies = \
self.quote_currencies[-1:] + self.quote_currencies[0:-1]
self.actuator.set_config_currency(self.quote_currencies[0])
self.refresh_balance()
def refresh_balance(self):
if self.btc_balance is None:
# Price has been discovered before wallet has been loaded
# and server connect... so bail.
return
self.set_balances(self.btc_balance)
self.amount_input_changed(self.amount_input.text())
def set_balances(self, btc_balance):
"""Set the dogecoin balance and update the amount label accordingly."""
self.btc_balance = btc_balance
quote_text = self.create_quote_text(btc_balance)
if quote_text:
quote_text = "(%s)" % quote_text
amount = self.actuator.g.format_amount(btc_balance)
unit = self.actuator.g.base_unit()
self.balance_label.set_balance_text(amount, unit, quote_text)
self.setWindowTitle("Electrum-RADC %s - %s %s" % (electrum_version, amount, unit))
def amount_input_changed(self, amount_text):
"""Update the number of dogecoins displayed."""
self.check_button_status()
try:
amount = D(str(amount_text)) * (10**self.actuator.g.decimal_point)
except decimal.InvalidOperation:
self.balance_label.show_balance()
else:
quote_text = self.create_quote_text(amount)
if quote_text:
self.balance_label.set_amount_text(quote_text)
self.balance_label.show_amount()
else:
self.balance_label.show_balance()
def create_quote_text(self, btc_balance):
"""Return a string copy of the amount fiat currency the
user has in dogecoins."""
from electrum_doge.plugins import run_hook
r = {}
run_hook('get_fiat_balance_text', btc_balance, r)
return r.get(0,'')
def send(self):
if self.actuator.send(self.address_input.text(),
self.amount_input.text(), self):
self.address_input.setText("")
self.amount_input.setText("")
def check_button_status(self):
"""Check that the dogecoin address is valid and that something
is entered in the amount before making the send button clickable."""
try:
value = D(str(self.amount_input.text())) * (10**self.actuator.g.decimal_point)
except decimal.InvalidOperation:
value = None
# self.address_input.property(...) returns a qVariant, not a bool.
# The == is needed to properly invoke a comparison.
if (self.address_input.property("isValid") == True and
value is not None and 0 < value <= self.btc_balance):
self.send_button.setDisabled(False)
else:
self.send_button.setDisabled(True)
def address_field_changed(self, address):
# label or alias, with address in brackets
match2 = re.match("(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>",
address)
if match2:
address = match2.group(2)
self.address_input.setText(address)
if is_valid(address):
self.check_button_status()
self.address_input.setProperty("isValid", True)
self.recompute_style(self.address_input)
else:
self.send_button.setDisabled(True)
self.address_input.setProperty("isValid", False)
self.recompute_style(self.address_input)
if len(address) == 0:
self.address_input.setProperty("isValid", None)
self.recompute_style(self.address_input)
def recompute_style(self, element):
self.style().unpolish(element)
self.style().polish(element)
def copy_address(self):
receive_popup = ReceivePopup(self.receive_button)
self.actuator.copy_address(receive_popup)
def update_completions(self, completions):
self.address_completions.setStringList(completions)
def update_history(self, tx_history):
self.history_list.empty()
for item in tx_history[-10:]:
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
label = self.actuator.g.wallet.get_label(tx_hash)[0]
v_str = self.actuator.g.format_amount(value, True)
self.history_list.append(label, v_str, age(timestamp))
def the_website(self):
webbrowser.open("http://electrum-doge.com")
def toggle_receiving_layout(self, toggle_state):
if toggle_state:
self.receiving_box.show()
else:
self.receiving_box.hide()
self.config.set_key("gui_show_receiving", toggle_state)
def show_history(self, toggle_state):
if toggle_state:
self.main_layout.setRowMinimumHeight(3,200)
self.history_list.show()
else:
self.main_layout.setRowMinimumHeight(3,0)
self.history_list.hide()
self.config.set_key("gui_show_history", toggle_state)
class BalanceLabel(QLabel):
SHOW_CONNECTING = 1
SHOW_BALANCE = 2
SHOW_AMOUNT = 3
def __init__(self, change_quote_currency, parent=None):
super(QLabel, self).__init__(_("Connecting..."), parent)
self.change_quote_currency = change_quote_currency
self.state = self.SHOW_CONNECTING
self.balance_text = ""
self.amount_text = ""
self.parent = parent
def mousePressEvent(self, event):
"""Change the fiat currency selection if window background is clicked."""
if self.state != self.SHOW_CONNECTING:
if event.button() == Qt.LeftButton:
self.change_quote_currency()
else:
position = event.globalPos()
menu = self.parent.context_menu()
menu.exec_(position)
def set_balance_text(self, amount, unit, quote_text):
"""Set the amount of dogecoins in the gui."""
if self.state == self.SHOW_CONNECTING:
self.state = self.SHOW_BALANCE
self.balance_text = "<span style='font-size: 18pt'>%s</span>"%amount\
+ " <span style='font-size: 10pt'>%s</span>" % unit \
+ " <span style='font-size: 10pt'>%s</span>" % quote_text
if self.state == self.SHOW_BALANCE:
self.setText(self.balance_text)
def set_amount_text(self, quote_text):
self.amount_text = "<span style='font-size: 10pt'>%s</span>" % quote_text
if self.state == self.SHOW_AMOUNT:
self.setText(self.amount_text)
def show_balance(self):
if self.state == self.SHOW_AMOUNT:
self.state = self.SHOW_BALANCE
self.setText(self.balance_text)
def show_amount(self):
if self.state == self.SHOW_BALANCE:
self.state = self.SHOW_AMOUNT
self.setText(self.amount_text)
def ok_cancel_buttons(dialog):
row_layout = QHBoxLayout()
row_layout.addStretch(1)
ok_button = QPushButton(_("OK"))
row_layout.addWidget(ok_button)
ok_button.clicked.connect(dialog.accept)
cancel_button = QPushButton(_("Cancel"))
row_layout.addWidget(cancel_button)
cancel_button.clicked.connect(dialog.reject)
return row_layout
class PasswordDialog(QDialog):
def __init__(self, parent):
super(QDialog, self).__init__(parent)
self.setModal(True)
self.password_input = QLineEdit()
self.password_input.setEchoMode(QLineEdit.Password)
main_layout = QVBoxLayout(self)
message = _('Please enter your password')
main_layout.addWidget(QLabel(message))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(self.password_input, 1, 1)
main_layout.addLayout(grid)
main_layout.addLayout(ok_cancel_buttons(self))
self.setLayout(main_layout)
def run(self):
if not self.exec_():
return
return unicode(self.password_input.text())
class ReceivePopup(QDialog):
def leaveEvent(self, event):
self.close()
def setup(self, address):
label = QLabel(_("Copied your Dogecoin address to the clipboard!"))
address_display = QLineEdit(address)
address_display.setReadOnly(True)
resize_line_edit_width(address_display, address)
main_layout = QVBoxLayout(self)
main_layout.addWidget(label)
main_layout.addWidget(address_display)
self.setMouseTracking(True)
self.setWindowTitle("Electrum-RADC - " + _("Receive Dogecoin payment"))
self.setWindowFlags(Qt.Window|Qt.FramelessWindowHint|
Qt.MSWindowsFixedSizeDialogHint)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
#self.setFrameStyle(QFrame.WinPanel|QFrame.Raised)
#self.setAlignment(Qt.AlignCenter)
def popup(self):
parent = self.parent()
top_left_pos = parent.mapToGlobal(parent.rect().bottomLeft())
self.move(top_left_pos)
center_mouse_pos = self.mapToGlobal(self.rect().center())
QCursor.setPos(center_mouse_pos)
self.show()
class MiniActuator:
"""Initialize the definitions relating to themes and
sending/receiving dogecoins."""
def __init__(self, main_window):
"""Retrieve the gui theme used in previous session."""
self.g = main_window
self.theme_name = self.g.config.get('litegui_theme','Doge')
self.theme_name = 'Doge'
self.themes = load_theme_paths()
self.load_theme()
def load_theme(self):
"""Load theme retrieved from wallet file."""
try:
theme_prefix, theme_path = self.themes[self.theme_name]
except KeyError:
util.print_error("Theme not found!", self.theme_name)
return
full_theme_path = "%s/%s/style.css" % (theme_prefix, theme_path)
with open(full_theme_path) as style_file:
qApp.setStyleSheet(style_file.read())
def theme_names(self):
"""Sort themes."""
return sorted(self.themes.keys())
def selected_theme(self):
"""Select theme."""
return self.theme_name
def change_theme(self, theme_name):
"""Change theme."""
self.theme_name = theme_name
self.g.config.set_key('litegui_theme',theme_name)
self.load_theme()
def set_configured_exchange(self, set_exchange):
use_exchange = self.g.config.get('use_exchange')
if use_exchange is not None:
set_exchange(use_exchange)
def set_configured_currency(self, set_quote_currency):
"""Set the inital fiat currency conversion country (USD/EUR/GBP) in
the GUI to what it was set to in the wallet."""
currency = self.g.config.get('currency')
# currency can be none when Electrum is used for the first
# time and no setting has been created yet.
if currency is not None:
set_quote_currency(currency)
def set_config_exchange(self, conversion_exchange):
self.g.config.set_key('exchange',conversion_exchange,True)
self.g.update_status()
def set_config_currency(self, conversion_currency):
"""Change the wallet fiat currency country."""
self.g.config.set_key('currency',conversion_currency,True)
self.g.update_status()
def copy_address(self, receive_popup):
"""Copy the wallet addresses into the client."""
addrs = [addr for addr in self.g.wallet.addresses(True)
if not self.g.wallet.is_change(addr)]
# Select most recent addresses from gap limit
addrs = addrs[-self.g.wallet.gap_limit:]
copied_address = random.choice(addrs)
qApp.clipboard().setText(copied_address)
receive_popup.setup(copied_address)
receive_popup.popup()
def waiting_dialog(self, f):
s = Timer()
s.start()
w = QDialog()
w.resize(200, 70)
w.setWindowTitle('Electrum-RADC')
l = QLabel(_('Sending transaction, please wait.'))
vbox = QVBoxLayout()
vbox.addWidget(l)
w.setLayout(vbox)
w.show()
def ff():
s = f()
if s: l.setText(s)
else: w.close()
w.connect(s, QtCore.SIGNAL('timersignal'), ff)
w.exec_()
w.destroy()
def send(self, address, amount, parent_window):
"""Send dogecoins to the target address."""
dest_address = self.fetch_destination(address)
if dest_address is None or not is_valid(dest_address):
QMessageBox.warning(parent_window, _('Error'),
_('Invalid Dogecoin Address') + ':\n' + address, _('OK'))
return False
amount = D(unicode(amount)) * (10*self.g.decimal_point)
print "amount", amount
return
if self.g.wallet.use_encryption:
password_dialog = PasswordDialog(parent_window)
password = password_dialog.run()
if not password:
return
else:
password = None
fee = 0
# 0.1 BTC = 10000000
if amount < bitcoin(1) / 10:
# 0.001 BTC
fee = bitcoin(1) / 1000
try:
tx = self.g.wallet.mktx([(dest_address, amount)], password, fee)
except Exception as error:
QMessageBox.warning(parent_window, _('Error'), str(error), _('OK'))
return False
if tx.is_complete():
h = self.g.wallet.send_tx(tx)
self.waiting_dialog(lambda: False if self.g.wallet.tx_event.isSet() else _("Sending transaction, please wait..."))
status, message = self.g.wallet.receive_tx(h, tx)
if not status:
import tempfile
dumpf = tempfile.NamedTemporaryFile(delete=False)
dumpf.write(tx)
dumpf.close()
print "Dumped error tx to", dumpf.name
QMessageBox.warning(parent_window, _('Error'), message, _('OK'))
return False
TransactionWindow(message, self)
else:
filename = 'unsigned_tx_%s' % (time.mktime(time.gmtime()))
try:
fileName = QFileDialog.getSaveFileName(QWidget(), _("Select a transaction filename"), os.path.expanduser('~/%s' % (filename)))
with open(fileName,'w') as f:
f.write(json.dumps(tx.as_dict(),indent=4) + '\n')
QMessageBox.information(QWidget(), _('Unsigned transaction created'), _("Unsigned transaction was saved to file:") + " " +fileName, _('OK'))
except Exception as e:
QMessageBox.warning(QWidget(), _('Error'), _('Could not write transaction to file: %s' % e), _('OK'))
return True
def fetch_destination(self, address):
recipient = unicode(address).strip()
# alias
match1 = re.match("^(|([\w\-\.]+)@)((\w[\w\-]+\.)+[\w\-]+)$",
recipient)
# label or alias, with address in brackets
match2 = re.match("(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>",
recipient)
if match1:
dest_address = \
self.g.wallet.get_alias(recipient, True,
self.show_message, self.question)
return dest_address
elif match2:
return match2.group(2)
else:
return recipient
class MiniDriver(QObject):
INITIALIZING = 0
CONNECTING = 1
SYNCHRONIZING = 2
READY = 3
def __init__(self, main_window, mini_window):
super(QObject, self).__init__()
self.g = main_window
self.network = main_window.network
self.window = mini_window
if self.network:
self.network.register_callback('updated',self.update_callback)
self.network.register_callback('status', self.update_callback)
self.state = None
self.initializing()
self.connect(self, SIGNAL("updatesignal()"), self.update)
self.update_callback()
# This is a hack to workaround that Qt does not like changing the
# window properties from this other thread before the runloop has
# been called from.
def update_callback(self):
self.emit(SIGNAL("updatesignal()"))
def update(self):
if not self.network:
self.initializing()
#elif not self.network.interface:
# self.initializing()
elif not self.network.is_connected():
self.connecting()
if self.g.wallet is None:
self.ready()
elif not self.g.wallet.up_to_date:
self.synchronizing()
else:
self.ready()
self.update_balance()
self.update_completions()
self.update_history()
self.window.receiving.update_list()
def initializing(self):
if self.state == self.INITIALIZING:
return
self.state = self.INITIALIZING
self.window.deactivate()
def connecting(self):
if self.state == self.CONNECTING:
return
self.state = self.CONNECTING
self.window.deactivate()
def synchronizing(self):
if self.state == self.SYNCHRONIZING:
return
self.state = self.SYNCHRONIZING
self.window.deactivate()
def ready(self):
if self.state == self.READY:
return
self.state = self.READY
self.window.activate()
def update_balance(self):
conf_balance, unconf_balance = self.g.wallet.get_balance()
balance = D(conf_balance + unconf_balance)
self.window.set_balances(balance)
def update_completions(self):
completions = []
for addr, label in self.g.wallet.labels.items():
if addr in self.g.wallet.addressbook:
completions.append("%s <%s>" % (label, addr))
self.window.update_completions(completions)
def update_history(self):
tx_history = self.g.wallet.get_tx_history()
self.window.update_history(tx_history)
if __name__ == "__main__":
app = QApplication(sys.argv)
with open(rsrc("style.css")) as style_file:
app.setStyleSheet(style_file.read())
mini = MiniWindow()
sys.exit(app.exec_())
|
#!/usr/bin/env python
from typing import Tuple
from redbot.message import headers
from redbot.syntax import rfc7230, rfc7231
from redbot.type import AddNoteMethodType
class keep_alive(headers.HttpHeader):
canonical_name = "Keep-Alive"
description = """\
The `Keep-Alive` header is completely optional; it is defined primarily because the `keep-alive`
connection token implies that such a header exists, not because anyone actually uses it.
Some implementations (e.g., [Apache](http://httpd.apache.org/)) do generate a `Keep-Alive` header
to convey how many requests they're willing to serve on a single connection, what the connection
timeout is and other information. However, this isn't usually used by clients.
It's safe to remove this header if you wish to save a few bytes in the response."""
reference = "https://tools.ietf.org/html/rfc2068#section-19.7.1"
syntax = rfc7230.list_rule(rfc7231.parameter)
list_header = True
deprecated = True
valid_in_requests = True
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> Tuple[str, str]:
try:
attr, attr_val = field_value.split("=", 1)
attr_val = headers.unquote_string(attr_val)
except ValueError:
attr = field_value
attr_val = None
return (attr.lower(), attr_val)
class KeepAliveTest(headers.HeaderTest):
name = "Keep-Alive"
inputs = [b"timeout=30"]
expected_out = [("timeout", "30")]
expected_err = [headers.HEADER_DEPRECATED]
class EmptyKeepAliveTest(headers.HeaderTest):
name = "Keep-Alive"
inputs = [b""]
expected_out = [] # type: ignore
expected_err = [headers.HEADER_DEPRECATED]
|
"""
A simple test for LightGBM based on scikit-learn.
Tests are not shipped with the source distribution so we include a simple
functional test here that is adapted from:
https://github.com/Microsoft/LightGBM/blob/master/tests/python_package_test/test_sklearn.py
"""
import unittest
import lightgbm as lgb
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
class TestSklearn(unittest.TestCase):
def test_binary(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = log_loss(y_test, gbm.predict_proba(X_test))
self.assertLess(ret, 0.15)
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['binary_logloss'][gbm.best_iteration_ - 1], places=5)
def test_regression(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=50, silent=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 16)
self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
import mock
import pytest
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
@mock.patch("gql.transport.requests.RequestsHTTPTransport.execute")
def test_retries(execute_mock):
expected_retries = 3
execute_mock.side_effect = Exception("fail")
client = Client(
retries=expected_retries,
transport=RequestsHTTPTransport(url="http://swapi.graphene-python.org/graphql"),
)
query = gql(
"""
{
myFavoriteFilm: film(id:"RmlsbToz") {
id
title
episodeId
}
}
"""
)
with pytest.raises(Exception):
client.execute(query)
assert execute_mock.call_count == expected_retries
|
import heterocl as hcl
import numpy as np
def test_partition_before_streaming():
hcl.init()
A = hcl.placeholder((10, 10), "A", dtype=hcl.UInt(8))
def kernel(A):
B = hcl.compute(A.shape, lambda *args : A[args] + 1, "B", dtype=hcl.UInt(8))
return B
target = hcl.platform.zc706
s = hcl.create_schedule([A], kernel)
s.partition(A, hcl.Partition.Block, dim=1, factor=2)
s.to(A, target.xcel)
s.to(kernel.B, target.host)
target.config(compile="vivado_hls", mode="debug")
print(hcl.build(s, target))
def test_partition_after_streaming():
hcl.init()
A = hcl.placeholder((10, 10), "A", dtype=hcl.UInt(8))
def kernel(A):
B = hcl.compute(A.shape, lambda *args : A[args] + 1, "B", dtype=hcl.UInt(8))
return B
target = hcl.platform.zc706
s = hcl.create_schedule([A], kernel)
A_ = s.to(A, target.xcel)
s.partition(A_, hcl.Partition.Block, dim=1, factor=2) # memory optimization
s.to(kernel.B, target.host)
target.config(compile="vivado_hls", mode="debug")
print(hcl.build(s, target))
if __name__ == '__main__':
test_partition_before_streaming()
test_partition_after_streaming()
|
import numpy as np
import pandas as pd
import os
LOCAL_PATH = os.getcwd()
SIMULATIONS_PATH = '../../simulations'
LOG_PATH = '../../log'
FIG_PATH = '../../fig'
os.makedirs(SIMULATIONS_PATH, exist_ok=True)
os.makedirs(LOG_PATH, exist_ok=True)
os.makedirs(FIG_PATH, exist_ok=True)
parameters = {}
default_params = {
'dA': 0.5*(np.sqrt(2)-1),
'dB': 0.5*(np.sqrt(2)+1),
'c': 0.25,
'sigma': .2,
'Nsteps': int(1e5)
}
Dt = 1e-3/default_params['dA']
Ss = np.array([100, 101], dtype=int)
Ntaus = np.array([900, 950, 1000, 1050, 1100])
i = 0
for S in Ss:
for Ntau in Ntaus:
parameters[i] = default_params.copy()
parameters[i]['Dt'] = Dt
parameters[i]['S'] = S
parameters[i]['Ntau'] = Ntau
parameters[i]['tau'] = Ntau*Dt
parameters[i]['rB'] = np.sqrt(S*default_params['c'])*default_params['sigma']
i += 1
pd.DataFrame(parameters).T.to_csv('parameters.csv')
|
"""
tests.unit.test_virtualname
~~~~~~~~~~~~~~~~~~~~
"""
import importlib.util
import logging
import os
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class FakeEntry:
def __init__(self, name, path, is_file=True):
self.name = name
self.path = path
self._is_file = is_file
def is_file(self):
return self._is_file
class VirtualNameTestCase(TestCase):
"""
Test that the virtualname is in the module name, to speed up lookup of
modules.
"""
maxDiff = None
@staticmethod
def _import_module(testpath):
spec = importlib.util.spec_from_file_location("tmpmodule", testpath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _check_modules(self, path):
"""
check modules in directory
"""
ret = []
for entry in os.listdir(path):
name, path = os.path.splitext(os.path.basename(entry))[0], entry
if name.startswith(".") or name.startswith("_"):
continue
if os.path.isfile(path) and not name.endswith(".py"):
continue
testpath = (
path if os.path.isfile(path) else os.path.join(path, "__init__.py")
)
module = self._import_module(testpath)
if hasattr(module, "__virtualname__"):
if module.__virtualname__ not in name:
ret.append(
'Virtual name "{}" is not in the module filename "{}": {}'.format(
module.__virtualname__, name, path
)
)
return ret
def test_check_virtualname(self):
"""
Test that the virtualname is in __name__ of the module
"""
errors = []
for entry in os.listdir(RUNTIME_VARS.SALT_CODE_DIR):
name, path = os.path.splitext(os.path.basename(entry))[0], entry
if name.startswith(".") or name.startswith("_") or not os.path.isdir(path):
continue
if name in ("cli", "defaults", "spm", "daemons", "ext", "templates"):
continue
if name == "cloud":
entry = os.path.join(RUNTIME_VARS.SALT_CODE_DIR, "cloud", "clouds")
errors.extend(self._check_modules(entry))
for error in errors:
log.critical(error)
assert not errors
|
from .action_v1 import (
check_parse_error,
check_str_enum,
convert_date,
convert_enum_str,
convert_str_enum,
)
from .helpers import STR2PY, PY2STR, INSP_STR, issub_safe
from datetime import date
from enum import Enum
from functools import partial
"""
As JSON requires string keys, unless dicts are only allowed to be Dict[str, T], we need to
be able to encode values as strings.
Recommendations:
* The string verbs are not intended for direct use.
* Use these verbs for any type that must be represented as a key in a JSON object.
* The standard rules will only handle types that are reliable keys and have obvious string
encodings.
See std.dicts for an example.
"""
def stringify_keys(verb, typ, ctx):
if verb not in (STR2PY, PY2STR, INSP_STR):
return
if typ in (str, int):
if verb == STR2PY:
return typ
elif verb == PY2STR:
return str
elif verb == INSP_STR:
return partial(check_parse_error, parser=typ, error=ValueError)
elif typ == date:
if verb == PY2STR:
return typ.isoformat
elif verb in (STR2PY, INSP_STR):
parse = convert_date
if verb == STR2PY:
return parse
else:
return partial(
check_parse_error, parser=parse, error=(TypeError, ValueError)
)
elif issub_safe(typ, Enum):
if verb == PY2STR:
return partial(convert_enum_str, typ=typ)
elif verb == STR2PY:
return partial(convert_str_enum, typ=typ)
elif verb == INSP_STR:
return partial(check_str_enum, typ=typ)
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "chancoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "chancoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
"""
This module handles initial database propagation, which is only run the first
time the game starts. It will create some default channels, objects, and
other things.
Everything starts at handle_setup()
"""
import django
from django.conf import settings
from django.contrib.auth import get_user_model
from src.server.models import ServerConfig
from src.utils import create
from django.utils.translation import ugettext as _
def create_config_values():
"""
Creates the initial config values.
"""
ServerConfig.objects.conf("site_name", settings.SERVERNAME)
ServerConfig.objects.conf("idle_timeout", settings.IDLE_TIMEOUT)
def get_god_player():
"""
Creates the god user.
"""
PlayerDB = get_user_model()
try:
god_player = PlayerDB.objects.get(id=1)
except PlayerDB.DoesNotExist:
txt = "\n\nNo superuser exists yet. The superuser is the 'owner'"
txt += "\account on the Evennia server. Create a new superuser using"
txt += "\nthe command"
txt += "\n\n python manage.py createsuperuser"
txt += "\n\nFollow the prompts, then restart the server."
raise Exception(txt)
return god_player
def create_objects():
"""
Creates the #1 player and Limbo room.
"""
print " Creating objects (Player #1 and Limbo room) ..."
# Set the initial User's account object's username on the #1 object.
# This object is pure django and only holds name, email and password.
god_player = get_god_player()
# Create a Player 'user profile' object to hold eventual
# mud-specific settings for the PlayerDB object.
player_typeclass = settings.BASE_PLAYER_TYPECLASS
# run all creation hooks on god_player (we must do so manually
# since the manage.py command does not)
god_player.typeclass_path = player_typeclass
god_player.basetype_setup()
god_player.at_player_creation()
god_player.locks.add("examine:perm(Immortals);edit:false();delete:false();boot:false();msg:all()")
# this is necessary for quelling to work correctly.
god_player.permissions.add("Immortals")
# Limbo is the default "nowhere" starting room
# Create the in-game god-character for player #1 and set
# it to exist in Limbo.
character_typeclass = settings.BASE_CHARACTER_TYPECLASS
god_character = create.create_object(character_typeclass,
key=god_player.username, nohome=True)
god_character.id = 1
god_character.db.desc = _('This is User #1.')
god_character.locks.add("examine:perm(Immortals);edit:false();delete:false();boot:false();msg:all();puppet:false()")
god_character.permissions.add("Immortals")
god_character.save()
god_player.attributes.add("_first_login", True)
god_player.attributes.add("_last_puppet", god_character)
god_player.db._playable_characters.append(god_character)
room_typeclass = settings.BASE_ROOM_TYPECLASS
limbo_obj = create.create_object(room_typeclass, _('Limbo'), nohome=True)
limbo_obj.id = 2
string = " ".join([
"Welcome to your new {wEvennia{n-based game. From here you are ready",
"to begin development. Visit http://evennia.com if you should need",
"help or would like to participate in community discussions. If you",
"are logged in as User #1 you can create a demo/tutorial area with",
"'@batchcommand contrib.tutorial_world.build'. Log out and create",
"a new non-admin account at the login screen to play the tutorial",
"properly."])
string = _(string)
limbo_obj.db.desc = string
limbo_obj.save()
# Now that Limbo exists, try to set the user up in Limbo (unless
# the creation hooks already fixed this).
if not god_character.location:
god_character.location = limbo_obj
if not god_character.home:
god_character.home = limbo_obj
def create_channels():
"""
Creates some sensible default channels.
"""
print " Creating default channels ..."
# public channel
key1, aliases, desc, locks = settings.CHANNEL_PUBLIC
pchan = create.create_channel(key1, aliases, desc, locks=locks)
# mudinfo channel
key2, aliases, desc, locks = settings.CHANNEL_MUDINFO
ichan = create.create_channel(key2, aliases, desc, locks=locks)
# connectinfo channel
key3, aliases, desc, locks = settings.CHANNEL_CONNECTINFO
cchan = create.create_channel(key3, aliases, desc, locks=locks)
# TODO: postgresql-psycopg2 has a strange error when trying to
# connect the user to the default channels. It works fine from inside
# the game, but not from the initial startup. We are temporarily bypassing
# the problem with the following fix. See Evennia Issue 151.
if ((".".join(str(i) for i in django.VERSION) < "1.2"
and settings.DATABASE_ENGINE == "postgresql_psycopg2")
or (hasattr(settings, 'DATABASES')
and settings.DATABASES.get("default", {}).get('ENGINE', None)
== 'django.db.backends.postgresql_psycopg2')):
warning = """
PostgreSQL-psycopg2 compatability fix:
The in-game channels %s, %s and %s were created,
but the superuser was not yet connected to them. Please use in
game commands to onnect Player #1 to those channels when first
logging in.
""" % (key1, key2, key3)
print warning
return
# connect the god user to all these channels by default.
goduser = get_god_player()
pchan.connect(goduser)
ichan.connect(goduser)
cchan.connect(goduser)
def create_system_scripts():
"""
Setup the system repeat scripts. They are automatically started
by the create_script function.
"""
from src.scripts import scripts
print " Creating and starting global scripts ..."
# check so that all sessions are alive.
script1 = create.create_script(scripts.CheckSessions)
# validate all scripts in script table.
script2 = create.create_script(scripts.ValidateScripts)
# update the channel handler to make sure it's in sync
script3 = create.create_script(scripts.ValidateChannelHandler)
# flush the idmapper cache
script4 = create.create_script(scripts.ValidateIdmapperCache)
if not script1 or not script2 or not script3 or not script4:
print " Error creating system scripts."
def start_game_time():
"""
This starts a persistent script that keeps track of the
in-game time (in whatever accelerated reference frame), but also
the total run time of the server as well as its current uptime
(the uptime can also be found directly from the server though).
"""
print " Starting in-game time ..."
from src.utils import gametime
gametime.init_gametime()
def create_admin_media_links():
"""
This traverses to src/web/media and tries to create a symbolic
link to the django media files from within the MEDIA_ROOT.
These are files we normally don't
want to mess with (use templates to customize the admin
look). Linking is needed since the Twisted webserver otherwise has no
notion of where the default files are - and we cannot hard-code it
since the django install may be at different locations depending
on system.
"""
import django
import os
if django.get_version() < 1.4:
dpath = os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
dpath = os.path.join(django.__path__[0], 'contrib', 'admin', 'static', 'admin')
apath = os.path.join(settings.ADMIN_MEDIA_ROOT)
if os.path.isdir(apath):
print " ADMIN_MEDIA_ROOT already exists. Ignored."
return
if os.name == 'nt':
print " Admin-media files copied to ADMIN_MEDIA_ROOT (Windows mode)."
os.mkdir(apath)
os.system('xcopy "%s" "%s" /e /q /c' % (dpath, apath))
if os.name == 'posix':
try:
os.symlink(dpath, apath)
print " Admin-media symlinked to ADMIN_MEDIA_ROOT."
except OSError, e:
print " There was an error symlinking Admin-media to ADMIN_MEDIA_ROOT:\n %s\n -> \n %s\n (%s)\n If you see issues, link manually." % (dpath, apath, e)
else:
print " Admin-media files should be copied manually to ADMIN_MEDIA_ROOT."
def at_initial_setup():
"""
Custom hook for users to overload some or all parts of the initial
setup. Called very last in the sequence. It tries to import and
srun a module settings.AT_INITIAL_SETUP_HOOK_MODULE and will fail
silently if this does not exist or fails to load.
"""
modname = settings.AT_INITIAL_SETUP_HOOK_MODULE
if not modname:
return
try:
mod = __import__(modname, fromlist=[None])
except (ImportError, ValueError):
return
print " Running at_initial_setup() hook."
if mod.__dict__.get("at_initial_setup", None):
mod.at_initial_setup()
def reset_server():
"""
We end the initialization by resetting the server. This
makes sure the first login is the same as all the following
ones, particularly it cleans all caches for the special objects.
It also checks so the warm-reset mechanism works as it should.
"""
from src.server.sessionhandler import SESSIONS
print " Initial setup complete. Restarting Server once."
SESSIONS.server.shutdown(mode='reset')
def handle_setup(last_step):
"""
Main logic for the module. It allows for restarting
the initialization at any point if one of the modules
should crash.
"""
if last_step < 0:
# this means we don't need to handle setup since
# it already ran sucessfully once.
return
elif last_step is None:
# config doesn't exist yet. First start of server
last_step = 0
# setting up the list of functions to run
setup_queue = [
create_config_values,
create_objects,
create_channels,
create_system_scripts,
start_game_time,
create_admin_media_links,
at_initial_setup,
reset_server
]
#print " Initial setup: %s steps." % (len(setup_queue))
# step through queue, from last completed function
for num, setup_func in enumerate(setup_queue[last_step:]):
# run the setup function. Note that if there is a
# traceback we let it stop the system so the config
# step is not saved.
#print "%s..." % num
try:
setup_func()
except Exception:
if last_step + num == 2:
from src.players.models import PlayerDB
from src.objects.models import ObjectDB
for obj in ObjectDB.objects.all():
obj.delete()
for profile in PlayerDB.objects.all():
profile.delete()
elif last_step + num == 3:
from src.comms.models import ChannelDB, PlayerChannelConnection
ChannelDB.objects.all().delete()
PlayerChannelConnection.objects.all().delete()
raise
ServerConfig.objects.conf("last_initial_setup_step", last_step + num + 1)
# We got through the entire list. Set last_step to -1 so we don't
# have to run this again.
ServerConfig.objects.conf("last_initial_setup_step", -1)
|
#!/usr/bin/env python
import sys, os, re
try:
import chardet
except ImportError:
print "You need universal encoding detector for this script"
print " http://chardet.feedparser.org or apt-get install python-chardet"
sys.exit()
regexp_language = re.compile("\* +(.+) +translation", re.IGNORECASE)
js_template = """/* This file is automaticly generated by create_language_js.py */
// some data used in the examples
Ext.namespace('Ext.exampledata');
// TODO: complete and sort the list
Ext.exampledata.languages = [
%s
];
"""
def lang_name(file):
language = os.path.basename(file)
m = regexp_language.search(open(file).read(512))
if m:
language = m.groups()[0]
return language
def print_locale(lang_code):
print lang_code,
sys.stdout.flush()
return True
def main():
base_dir = "../../src/locale"
base_file = lambda f: os.path.join(base_dir, f)
try:
locales = os.listdir(base_dir)
except IOError:
print "Cannot find source locale directory: %s ... exiting" % base_dir
sys.exit()
valid_file = lambda e: e.endswith(".js") and e.startswith("ext-lang-")
char_set = lambda f: chardet.detect(open(f).read())['encoding']
lang_code = lambda f: f[9:f.rfind(".js")]
info_set = lambda f: (lang_name(base_file(f)), (lang_code(f), char_set(base_file(f))))
locales = dict(info_set(file) for file in locales if valid_file(file) and print_locale(lang_code(file)))
print "... done"
locale_strarray = ',\n'.join(["\t[%r, %r, %r]" % (code, name, charset) \
for name, (code, charset) in sorted(locales.items())])
# create languages.js
open("languages.js", "w").write(js_template % locale_strarray)
print "saved %d languages to languages.js" % len(locales)
if __name__=="__main__":
main()
|
from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.get_order(symbol="BTCUSDT", orderId=534333508)
# PrintBasic.print_obj(result)
|
#!/usr/bin/env python3
"""Base58 encoding
Implementation of Base58 and Base58Check, originally from
https://github.com/keis/base58, with the following modifications:
- type annotated python3
- using native python3 int.from_bytes() and i.to_bytes()
- added length check functionalities to decode and decode_check
"""
from hashlib import sha256
from typing import Union, Optional
from btclib.utils import double_sha256
# used digits
__digits = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__base = len(__digits)
def _str_to_bytes(v: Union[str, bytes]) -> bytes:
"""Encode string to bytes, stipping leading/trailing white spaces"""
if isinstance(v, str):
v = v.strip()
v = v.encode()
return v
def encode_from_int(i: int) -> bytes:
"""Encode an integer using Base58"""
if i == 0:
return __digits[0:1]
result = b""
while i:
i, idx = divmod(i, __base)
result = __digits[idx:idx+1] + result
return result
def encode(v: Union[str, bytes]) -> bytes:
"""Encode bytes using Base58"""
v = _str_to_bytes(v)
# preserve leading-0s
# leading-0s become base58 leading-1s
nPad = len(v)
v = v.lstrip(b'\0')
vlen = len(v)
nPad -= vlen
result = __digits[0:1] * nPad
if vlen:
i = int.from_bytes(v, 'big')
result += encode_from_int(i)
return result
def encode_check(v: Union[str, bytes]) -> bytes:
"""Encode bytes using Base58 with a 4 character checksum"""
v = _str_to_bytes(v)
digest = double_sha256(v)
return encode(v + digest[:4])
def decode_to_int(v: Union[str, bytes]) -> int:
"""Decode Base58 encoded bytes as integer"""
v = _str_to_bytes(v)
i = 0
for char in v:
i *= __base
i += __digits.index(char)
return i
def decode(v: Union[str, bytes],
output_size: Optional[int] = None) -> bytes:
"""Decode Base58 encoded bytes, with verified output length"""
v = _str_to_bytes(v)
# preserve leading-0s
# base58 leading-1s become leading-0s
nPad = len(v)
v = v.lstrip(__digits[0:1])
vlen = len(v)
nPad -= vlen
result = b'\0' * nPad
if vlen:
i = decode_to_int(v)
nbytes = (i.bit_length() + 7) // 8
result = result + i.to_bytes(nbytes, 'big')
if output_size is not None and len(result) != output_size:
m = "Invalid decoded size: "
m += f"{len(result)} bytes instead of {output_size}"
raise ValueError(m)
return result
def decode_check(v: Union[str, bytes],
output_size: Optional[int] = None) -> bytes:
"""Decode Base58 encoded bytes, with verified checksum and output length"""
if output_size is not None:
output_size += 4
result = decode(v, output_size)
result, checksum = result[:-4], result[-4:]
digest = double_sha256(result)
if checksum != digest[:4]:
m = f"Invalid checksum: '{checksum}' instead of '{digest[:4]}'"
raise ValueError(m)
return result
|
# -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from CsFile import CsFile
from CsProcess import CsProcess
import CsHelper
class CsApp:
def __init__(self, ip):
self.dev = ip.getDevice()
self.ip = ip.get_ip_address()
self.type = ip.get_type()
self.fw = ip.fw
self.config = ip.config
class CsApache(CsApp):
""" Set up Apache """
def remove(self):
file = "/etc/apache2/sites-enabled/vhost-%s.conf" % self.dev
if os.path.isfile(file):
os.remove(file)
CsHelper.service("apache2", "restart")
def setup(self):
CsHelper.copy_if_needed("/etc/apache2/vhost.template",
"/etc/apache2/sites-enabled/vhost-%s.conf" % self.ip)
file = CsFile("/etc/apache2/sites-enabled/vhost-%s.conf" % (self.ip))
file.search("<VirtualHost.*:80>", "\t<VirtualHost %s:80>" % (self.ip))
file.search("<VirtualHost.*:443>", "\t<VirtualHost %s:443>" % (self.ip))
file.search("Listen .*:80", "Listen %s:80" % (self.ip))
file.search("Listen .*:443", "Listen %s:443" % (self.ip))
file.search("ServerName.*", "\tServerName %s.%s" % (self.config.cl.get_type(), self.config.get_domain()))
if file.is_changed():
file.commit()
CsHelper.service("apache2", "restart")
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT" % (self.dev, self.ip)
])
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 443 -j ACCEPT" % (self.dev, self.ip)
])
class CsPasswdSvc():
"""
CloudStack VR password server
"""
def __init__(self, ip):
self.ip = ip
def start(self):
CsHelper.service("cloud-password-server@%s" % self.ip, "start")
def stop(self):
CsHelper.service("cloud-password-server@%s" % self.ip, "stop")
def restart(self):
CsHelper.service("cloud-password-server@%s" % self.ip, "restart")
class CsDnsmasq(CsApp):
""" Set up dnsmasq """
def add_firewall_rules(self):
""" Add the necessary firewall rules
"""
self.fw.append(["", "front",
"-A INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % self.dev
])
if self.config.has_dns():
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p udp -m udp --dport 53 -j ACCEPT" % (self.dev, self.ip)
])
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp --dport 53 -j ACCEPT" % (self.dev, self.ip)
])
|
#!/usr/bin/env python
# Copyright 2021 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
#!/usr/bin/env python
import os, sys
BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')
sys.path.insert(0, BASE_DIR)
try:
from django.conf import settings
from django.core.management import call_command
from django.test.utils import get_runner
except ImportError:
import traceback
traceback.print_exc()
msg = "To fix this error, run: pip install -r requirements_test.txt"
raise ImportError(msg)
def setup_proj():
try:
from test_settings import settings as test_settings
import django
setup = django.setup
except AttributeError:
pass
else:
settings.configure(**test_settings)
setup()
def run_tests(*test_args):
setup_proj()
if not test_args:
test_args = ['tests']
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
# first migrate
os.system(os.path.join(os.path.dirname(os.path.abspath(__file__)), "migrate_test_project.py"))
# then run tests
run_tests(*sys.argv[1:])
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"QualityScoreBucketEnum",},
)
class QualityScoreBucketEnum(proto.Message):
r"""The relative performance compared to other advertisers.
"""
class QualityScoreBucket(proto.Enum):
r"""Enum listing the possible quality score buckets."""
UNSPECIFIED = 0
UNKNOWN = 1
BELOW_AVERAGE = 2
AVERAGE = 3
ABOVE_AVERAGE = 4
__all__ = tuple(sorted(__protobuf__.manifest))
|
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datasets
import numpy as np
from model.utils.config import cfg
from datasets.factory import get_imdb
import PIL
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
if imdb.name.startswith('gta'):
sizes = [(1920, 1080)
for _ in range(imdb.num_images)]
elif imdb.name.startswith('kitti'):
sizes = [(1242, 375)
for _ in range(imdb.num_images)]
else:
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
if isinstance(roidb[i]['gt_overlaps'], (np.ndarray, np.generic)):
gt_overlaps = roidb[i]['gt_overlaps']
else:
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def rank_roidb_ratio(roidb):
# rank roidb based on the ratio between width and height.
ratio_large = 2 # largest ratio to preserve.
ratio_small = 0.5 # smallest ratio to preserve.
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def filter_roidb(roidb):
# filter the image without bounding box.
print('before filtering, there are %d images...' % (len(roidb)))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % (len(roidb)))
return roidb
def combined_roidb(imdb_names, training=True):
"""
Combine multiple roidbs
"""
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
# ratio_index = rank_roidb_ratio(imdb)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
roidb = filter_roidb(roidb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
return imdb, roidb, ratio_list, ratio_index
|
%matplotlib notebook
import time
import numpy as np
from matplotlib import pyplot as plt
from reachy import Reachy, parts
#Change this for USB port
reachy = Reachy(
#right_arm=parts.RightArm(io='/dev/ttyUSB*', hand='force_gripper'),
right_arm=parts.RightArm(io='ws', hand='force_gripper'),
)
for m in reachy.right_arm.motors:
m.compliant = False
#Wave Frame 1, bring arm up
reachy.goto({
'right_arm.shoulder_pitch': -20,
'right_arm.shoulder_roll': -10,
'right_arm.arm_yaw': -10,
'right_arm.elbow_pitch': -120,
'right_arm.hand.forearm_yaw': 0,
'right_arm.hand.wrist_pitch': 0,
'right_arm.hand.wrist_roll': 0,
'right_arm.hand.gripper': 0,
}, duration=2, wait=True)
#Wave Frame 2 (Hand Wave)
reachy.goto({
'right_arm.hand.wrist_roll': 40,
'right_arm.hand.gripper': 0,
}, duration=0.75, wait=True)
reachy.goto({
'right_arm.hand.wrist_roll': -40,
'right_arm.hand.gripper': 0,
}, duration=0.75, wait=True)
reachy.goto({
'right_arm.hand.wrist_roll': 40,
'right_arm.hand.gripper': 0,
}, duration=0.75, wait=True)
reachy.goto({
'right_arm.hand.wrist_roll': -40,
'right_arm.hand.gripper': 0,
}, duration=0.75, wait=True)
sleep(2) #wait 2 seconds
#Wave Frame 3, return to home position
reachy.goto({
'right_arm.shoulder_pitch': 0,
'right_arm.shoulder_roll': 0,
'right_arm.arm_yaw': 0,
'right_arm.elbow_pitch': 0,
'right_arm.hand.forearm_yaw': 0,
'right_arm.hand.wrist_pitch': 0,
'right_arm.hand.wrist_roll': 0,
'right_arm.hand.gripper': 0,
}, duration=2, wait=True)
|
from rest_framework import serializers
from goods.models import SPUSpecification
class SpecModelSerializer(serializers.ModelSerializer):
spu = serializers.StringRelatedField()
spu_id = serializers.IntegerField()
class Meta:
model = SPUSpecification
fields = ['id', 'name', 'spu', 'spu_id']
|
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from test.lib.testing import eq_, AssertsExecutionResults, assert_raises
from test.lib import testing
from test.lib import fixtures
from sqlalchemy.orm.attributes import instance_state
from sqlalchemy.orm.exc import FlushError
from test.lib.schema import Table, Column
engine = testing.db
class FlushOnPendingTest(AssertsExecutionResults, fixtures.TestBase):
def setUp(self):
global Parent, Child, Base
Base= declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
name = Column(String(50), nullable=False)
children = relationship("Child", load_on_pending=True)
class Child(Base):
__tablename__ = 'child'
id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
Base.metadata.create_all(engine)
def tearDown(self):
Base.metadata.drop_all(engine)
def test_annoying_autoflush_one(self):
sess = Session(engine)
p1 = Parent()
sess.add(p1)
p1.children = []
def test_annoying_autoflush_two(self):
sess = Session(engine)
p1 = Parent()
sess.add(p1)
assert p1.children == []
def test_dont_load_if_no_keys(self):
sess = Session(engine)
p1 = Parent()
sess.add(p1)
def go():
assert p1.children == []
self.assert_sql_count(testing.db, go, 0)
class LoadOnFKsTest(AssertsExecutionResults, fixtures.TestBase):
def setUp(self):
global Parent, Child, Base
Base= declarative_base()
class Parent(Base):
__tablename__ = 'parent'
__table_args__ = {'mysql_engine':'InnoDB'}
id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
class Child(Base):
__tablename__ = 'child'
__table_args__ = {'mysql_engine':'InnoDB'}
id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
parent = relationship(Parent, backref=backref("children"))
Base.metadata.create_all(engine)
global sess, p1, p2, c1, c2
sess = Session(bind=engine)
p1 = Parent()
p2 = Parent()
c1, c2 = Child(), Child()
c1.parent = p1
sess.add_all([p1, p2])
assert c1 in sess
sess.commit()
def tearDown(self):
sess.rollback()
Base.metadata.drop_all(engine)
def test_load_on_pending_disallows_backref_event(self):
Child.parent.property.load_on_pending = True
sess.autoflush = False
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
c3.parent = p1
# a side effect of load-on-pending with no autoflush.
# a change to the backref event handler to check
# collection membership before assuming "old == new so return"
# would fix this - but this is wasteful and autoflush
# should be turned on.
assert c3 not in p1.children
def test_load_on_persistent_allows_backref_event(self):
Child.parent.property.load_on_pending = True
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
c3.parent = p1
assert c3 in p1.children
def test_no_load_on_pending_allows_backref_event(self):
# users who stick with the program and don't use
# 'load_on_pending' get expected behavior
sess.autoflush = False
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
c3.parent = p1
assert c3 in p1.children
def test_autoflush_on_pending(self):
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
# pendings don't autoflush
assert c3.parent is None
def test_autoflush_on_pending(self):
Child.parent.property.load_on_pending = True
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
# ...unless the flag is on
assert c3.parent is p1
def test_collection_load_from_pending_populated(self):
Parent.children.property.load_on_pending = True
p2 = Parent(id=p1.id)
sess.add(p2)
# load should emit since PK is populated
def go():
assert p2.children
self.assert_sql_count(testing.db, go, 1)
def test_collection_load_from_pending_no_sql(self):
Parent.children.property.load_on_pending = True
p2 = Parent(id=None)
sess.add(p2)
# load should not emit since "None" is the bound
# param list
def go():
assert not p2.children
self.assert_sql_count(testing.db, go, 0)
def test_load_on_pending_with_set(self):
Child.parent.property.load_on_pending = True
p1.children
c3 = Child()
sess.add(c3)
c3.parent_id = p1.id
def go():
c3.parent = p1
self.assert_sql_count(testing.db, go, 0)
def test_backref_doesnt_double(self):
Child.parent.property.load_on_pending = True
sess.autoflush = False
p1.children
c3 = Child()
sess.add(c3)
c3.parent = p1
c3.parent = p1
c3.parent = p1
c3.parent = p1
assert len(p1.children)== 2
def test_m2o_lazy_loader_on_persistent(self):
"""Compare the behaviors from the lazyloader using
the "committed" state in all cases, vs. the lazyloader
using the "current" state in all cases except during flush.
"""
for loadfk in (True, False):
for loadrel in (True, False):
for autoflush in (True, False):
for manualflush in (True, False):
for fake_autoexpire in (True, False):
sess.autoflush = autoflush
if loadfk:
c1.parent_id
if loadrel:
c1.parent
c1.parent_id = p2.id
if manualflush:
sess.flush()
# fake_autoexpire refers to the eventual
# auto-expire of 'parent' when c1.parent_id
# is altered.
if fake_autoexpire:
sess.expire(c1, ['parent'])
# old 0.6 behavior
#if manualflush and (not loadrel or fake_autoexpire):
# # a flush occurs, we get p2
# assert c1.parent is p2
#elif not loadrel and not loadfk:
# # problematically - we get None since committed state
# # is empty when c1.parent_id was mutated, since we want
# # to save on selects. this is
# # why the patch goes in in 0.6 - this is mostly a bug.
# assert c1.parent is None
#else:
# # if things were loaded, autoflush doesn't even
# # happen.
# assert c1.parent is p1
# new behavior
if loadrel and not fake_autoexpire:
assert c1.parent is p1
else:
assert c1.parent is p2
sess.rollback()
def test_m2o_lazy_loader_on_pending(self):
for loadonpending in (False, True):
for autoflush in (False, True):
for manualflush in (False, True):
Child.parent.property.load_on_pending = loadonpending
sess.autoflush = autoflush
c2 = Child()
sess.add(c2)
c2.parent_id = p2.id
if manualflush:
sess.flush()
if loadonpending or manualflush:
assert c2.parent is p2
else:
assert c2.parent is None
sess.rollback()
def test_m2o_lazy_loader_on_transient(self):
for loadonpending in (False, True):
for attach in (False, True):
for autoflush in (False, True):
for manualflush in (False, True):
Child.parent.property.load_on_pending = loadonpending
sess.autoflush = autoflush
c2 = Child()
if attach:
sess._attach(instance_state(c2))
c2.parent_id = p2.id
if manualflush:
sess.flush()
if loadonpending and attach:
assert c2.parent is p2
else:
assert c2.parent is None
sess.rollback()
|
#array 2 dimensi
a = [
['linux', 'open source'],
['windows', 'licence'],
['mac', 'licence']
]
#untuk mengakses array 2 dimensi caranya adalah baris, kolom
#untuk linux, windows, mac itu adalah kolom
#untuk linux, open source itu adalah kolom
print(a[0][0])#akan menghasilkan linux
print(a[1][1])#akan menghasilkan licence
|
import numpy as np
from numpy.typing import ArrayLike
from numpy.random import choice
import numba
@numba.njit(fastmath=True)
def isin(val, arr):
for i in range(arr.shape[0]):
if arr[i] == val:
return True
return False
@numba.njit
def tournament(fitness_array:ArrayLike, n_selection:int) -> np.ndarray:
'''
Select the best individuals in a one vs one test.
Args:
fitness_array (np.typing.ArrayLike): Array with evaluated fitness from the generation
n_selection (int): Number of individuals that will be selected
Returns:
selected (np.ndarray): Array with indexs of selected individuals
'''
fitness_array = np.asarray(fitness_array)
selected = np.zeros(n_selection, dtype=np.int32)-1
size = fitness_array.shape[0]
for i in range(n_selection):
best_index = -1
while best_index == -1 or (isin(best_index, selected)):
index1 = np.random.randint(size)
index2 = np.random.randint(size)
if fitness_array[index1] > fitness_array[index2]:
best_index = index1
else:
best_index = index2
selected[i] = best_index
return selected
@numba.njit
def roulette(fitness_array:ArrayLike, n_selection:int) -> np.ndarray:
'''
Select the best individuals stocaticaly with the probability of beign chosen
equivalent to the individual fitness.
Args:
fitness_array (np.typing.ArrayLike): array with evaluated fitness from the generation
n_selection (int): number of individuals that will be selected
Returns:
selected (np.ndarray): array with indexs of selected individuals
'''
fitness = np.asarray(fitness_array)
if np.min(fitness) < 0:
fitness = fitness - np.min(fitness)
prob = fitness/np.sum(fitness)
size = fitness.shape[0]
indexs = np.arange(0, size, 1, dtype=np.int32)
# selected = np.random.choice(indexs, n_selection, p=probability) DON'T WORK WITH NJIT
cumsum = np.cumsum(prob)
selected = np.empty(n_selection, np.int32)
for i in range(n_selection):
index = -1
while index == -1 or (isin(index, selected)):
index = indexs[np.searchsorted(cumsum, np.random.random(), side="right")]
selected[i] = index
return selected
@numba.njit
def rank(fitness_array:ArrayLike, n_selection:int) -> np.ndarray:
'''
Select the n best individuals assuming the fitness_array is decreasingly sorted.
Args:
fitness_array (np.typing.ArrayLike): array with evaluated fitness from the generation
n_selection (int): number of individuals that will be selected
Returns:
np.arange(0, n_selection, 1, dtype=np.int32) (np.ndarray): array with indexs of selected individuals
'''
return np.arange(0, n_selection, 1, dtype=np.int32)
tournament.needs_sort = False
roulette.needs_sort = True
rank.needs_sort = True
def default_selection():
return tournament
|
import re
import html
import logging
import pandas as pd
import os
import random
import torch
from pathlib import Path
import pickle
import shutil
import itertools
import more_itertools
from sklearn.model_selection import train_test_split
from torch.utils.data import (
TensorDataset,
DataLoader,
RandomSampler,
SequentialSampler,
Dataset,
)
from torch.utils.data.distributed import DistributedSampler
import spacy
from tqdm import tqdm, trange
from fastprogress.fastprogress import master_bar, progress_bar
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizer,
ElectraConfig,
ElectraForSequenceClassification,
ElectraTokenizer,
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
),
"camembert-base": (
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizer,
),
"electra": (ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),
}
# Create text corpus suitable for language model training
def create_corpus(text_list, target_path, logger=None):
# nlp = spacy.load("en_core_web_sm", disable=["tagger", "ner", "textcat"])
with open(target_path, "w") as f:
# Split sentences for each document
logger.info("Formatting corpus for {}".format(target_path))
for text in progress_bar(text_list):
text = fix_html(text)
text = replace_multi_newline(text)
text = spec_add_spaces(text)
text = rm_useless_spaces(text)
text = text.strip()
f.write(text + "\n")
# text_lines = [re.sub(r"\n(\s)*","",str(sent)) for i, sent in enumerate(nlp(str(text)).sents)]
# text_lines = [text_line for text_line in text_lines if re.search(r'[a-zA-Z]', text_line)]
# f.write('\n'.join(text_lines))
# f.write("\n \n")
def spec_add_spaces(t: str) -> str:
"Add spaces around / and # in `t`. \n"
return re.sub(r"([/#\n])", r" \1 ", t)
def rm_useless_spaces(t: str) -> str:
"Remove multiple spaces in `t`."
return re.sub(" {2,}", " ", t)
def replace_multi_newline(t: str) -> str:
return re.sub(r"(\n(\s)*){2,}", "\n", t)
def fix_html(x: str) -> str:
"List of replacements from html strings in `x`."
re1 = re.compile(r" +")
x = (
x.replace("#39;", "'")
.replace("amp;", "&")
.replace("#146;", "'")
.replace("nbsp;", " ")
.replace("#36;", "$")
.replace("\\n", "\n")
.replace("quot;", "'")
.replace("<br />", "\n")
.replace('\\"', '"')
.replace(" @.@ ", ".")
.replace(" @-@ ", "-")
.replace(" @,@ ", ",")
.replace("\\", " \\ ")
)
return re1.sub(" ", html.unescape(x))
class TextDataset(Dataset):
def __init__(self, tokenizer, file_path, cache_path, logger, block_size=512):
assert os.path.isfile(file_path)
if os.path.exists(cache_path):
logger.info("Loading features from cached file %s", cache_path)
with open(cache_path, "rb") as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file %s", file_path)
self.examples = []
text = (line.strip() for line in open(file_path, encoding="utf-8"))
text = progress_bar(list(text))
text = map(
lambda x: tokenizer.convert_tokens_to_ids(tokenizer.tokenize(x)), text
)
text = itertools.chain.from_iterable(text)
text = more_itertools.chunked(text, block_size)
self.examples = list(text)[:-1]
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
logger.info("Saving features into cached file %s", cache_path)
with open(cache_path, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item])
# DataBunch object for language models
class BertLMDataBunch(object):
@staticmethod
def from_raw_corpus(
data_dir,
text_list,
tokenizer,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=True,
test_size=0.1,
model_type="bert",
logger=None,
clear_cache=False,
no_cache=False,
block_size=None
):
train_file = "lm_train.txt"
val_file = "lm_val.txt"
train_list, val_list = train_test_split(
text_list, test_size=test_size, shuffle=True
)
# Create train corpus
create_corpus(train_list, str(data_dir / train_file), logger=logger)
# Create val corpus
create_corpus(val_list, str(data_dir / val_file), logger=logger)
return BertLMDataBunch(
data_dir,
tokenizer,
train_file=train_file,
val_file=val_file,
batch_size_per_gpu=batch_size_per_gpu,
max_seq_length=max_seq_length,
multi_gpu=multi_gpu,
model_type=model_type,
logger=logger,
clear_cache=clear_cache,
no_cache=no_cache,
block_size=block_size
)
def __init__(
self,
data_dir,
tokenizer,
train_file="lm_train.txt",
val_file="lm_val.txt",
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=True,
model_type="bert",
logger=None,
clear_cache=False,
no_cache=False,
block_size=None
):
# just in case someone passes string instead of Path
if isinstance(data_dir, str):
data_dir = Path(data_dir)
# Instantiate correct tokenizer if the tokenizer name is passed instead of object
if isinstance(tokenizer, str):
_, _, tokenizer_class = MODEL_CLASSES[model_type]
# instantiate the new tokeniser object using the tokeniser name
tokenizer = tokenizer_class.from_pretrained(
tokenizer, do_lower_case=("uncased" in tokenizer)
)
# Bug workaround for RoBERTa
if model_type == "roberta":
tokenizer.max_len_single_sentence = tokenizer.max_len - 2
if block_size is None:
block_size = tokenizer.max_len_single_sentence
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.batch_size_per_gpu = batch_size_per_gpu
self.train_dl = None
self.val_dl = None
self.data_dir = data_dir
self.cache_dir = data_dir / "lm_cache"
self.no_cache = no_cache
self.model_type = model_type
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.n_gpu = 1
if multi_gpu:
self.n_gpu = torch.cuda.device_count()
if clear_cache:
shutil.rmtree(self.cache_dir, ignore_errors=True)
# Create folder if it doesn't exist
self.cache_dir.mkdir(exist_ok=True)
if train_file:
# Train DataLoader
# train_examples = None
cached_features_file = os.path.join(
self.cache_dir,
"cached_{}_{}_{}".format(
self.model_type, "train", str(self.max_seq_length)
),
)
train_filepath = str(self.data_dir / train_file)
train_dataset = TextDataset(
self.tokenizer,
train_filepath,
cached_features_file,
self.logger,
block_size=block_size,
)
self.train_batch_size = self.batch_size_per_gpu * max(1, self.n_gpu)
train_sampler = RandomSampler(train_dataset)
self.train_dl = DataLoader(
train_dataset, sampler=train_sampler, batch_size=self.train_batch_size
)
if val_file:
# Val DataLoader
# val_examples = None
cached_features_file = os.path.join(
self.cache_dir,
"cached_{}_{}_{}".format(
self.model_type, "dev", str(self.max_seq_length)
),
)
val_filepath = str(self.data_dir / val_file)
val_dataset = TextDataset(
self.tokenizer,
val_filepath,
cached_features_file,
self.logger,
block_size=block_size,
)
self.val_batch_size = self.batch_size_per_gpu * 2 * max(1, self.n_gpu)
val_sampler = RandomSampler(val_dataset)
self.val_dl = DataLoader(
val_dataset, sampler=val_sampler, batch_size=self.val_batch_size
)
# Mask tokens
def mask_tokens(self, inputs, mlm_probability=0.15):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = torch.bernoulli(
torch.full(labels.shape, mlm_probability)
).bool()
# do not mask special tokens
masked_indices[:, 0] = False
masked_indices[:, -1] = False
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(
len(self.tokenizer), labels.shape, dtype=torch.long
)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def save(self, filename="databunch.pkl"):
tmp_path = self.data_dir / "tmp"
tmp_path.mkdir(exist_ok=True)
with open(str(tmp_path / filename), "wb") as f:
pickle.dump(self, f)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
PYSCF Installation
==================
`PySCF <https://github.com/sunqm/pyscf>`__ is an open-source library for computational chemistry.
In order for Qiskit's chemistry module to interface PySCF and execute PySCF to
extract the electronic structure information PySCF must be installed.
According to the `PySCF installation instructions <http://sunqm.github.io/pyscf/install.html>`__,
the preferred installation method is via the pip package management system. Doing so,
while in the Python virtual environment where Qiskit's chemistry module is also installed, will
automatically make PySCF available to Qiskit at run time.
"""
from .pyscfdriver import PySCFDriver, InitialGuess
__all__ = ['PySCFDriver',
'InitialGuess']
|
from json import dumps
import smartpy as sp
FA2 = sp.io.import_script_from_url("https://smartpy.io/dev/templates/FA2.py")
class FundingCerti(FA2.FA2):
def __init__(self, config, metadata, admin):
if config.assume_consecutive_token_ids:
self.all_tokens.doc = """
This view is specified (but optional) in the standard.
This contract is built with assume_consecutive_token_ids =
True, so we return a list constructed from the number of tokens.
"""
else:
self.all_tokens.doc = """
This view is specified (but optional) in the standard.
This contract is built with assume_consecutive_token_ids =
False, so we convert the set of tokens from the storage to a list
to fit the expected type of TZIP-16.
"""
list_of_views = [
self.get_balance
, self.does_token_exist
, self.count_tokens
, self.all_tokens
, self.is_operator
]
if config.store_total_supply:
list_of_views = list_of_views + [self.total_supply]
if config.use_token_metadata_offchain_view:
self.set_token_metadata_view()
list_of_views = list_of_views + [self.token_metadata]
metadata_base = {
"version": config.name
, "description" : (
"This is a didactic reference implementation of FA2,"
+ " a.k.a. TZIP-012, using SmartPy.\n\n"
+ "This particular contract uses the configuration named: "
+ config.name + "."
)
, "interfaces": ["TZIP-012", "TZIP-016"]
, "authors": [
"Seb Mondet <https://seb.mondet.org>"
]
, "homepage": "https://gitlab.com/smondet/fa2-smartpy"
, "views": list_of_views
, "source": {
"tools": ["SmartPy"]
, "location": "https://gitlab.com/smondet/fa2-smartpy.git"
}
, "permissions": {
"operator":
"owner-or-operator-transfer" if config.support_operator else "owner-transfer"
, "receiver": "owner-no-hook"
, "sender": "owner-no-hook"
}
, "fa2-smartpy": {
"configuration" :
dict([(k, getattr(config, k)) for k in dir(config) if "__" not in k and k != 'my_map'])
}
}
self.init_metadata("metadata_base", metadata_base)
FA2.FA2_core.__init__(self, config, metadata, paused = False, administrator = admin)
def make_metadata(block, donor, donation):
"Helper function to build metadata JSON bytes values."
return (sp.map(l = {
"block_slug" : sp.utils.bytes_of_string(str(block.slug)),
"block_title" : sp.utils.bytes_of_string(str(block.title)),
"issuer_address": sp.utils.bytes_of_string(str(sp.source)),
"donor_name": sp.utils.bytes_of_string(str(donor.name)),
"donor_address": sp.utils.bytes_of_string(str(donor.address)),
"effective_donation": sp.utils.bytes_of_string(str(donation)),
"issuance_date": sp.utils.bytes_of_string(str(sp.now)),
}))
@sp.entry_point
def mint(self, params):
token_id = self.token_id_set.cardinal(self.data.all_tokens)
sp.verify(self.is_administrator(sp.sender), message = self.error_message.not_admin())
if self.config.single_asset:
sp.verify(token_id == 0, message = "single-asset: token-id <> 0")
if self.config.non_fungible:
sp.verify(params.amount == 1, message = "NFT-asset: amount <> 1")
sp.verify(
~ self.token_id_set.contains(self.data.all_tokens, token_id),
message = "NFT-asset: cannot mint twice same token"
)
user = self.ledger_key.make(params.address, token_id)
self.token_id_set.add(self.data.all_tokens, token_id)
sp.if self.data.ledger.contains(user):
self.data.ledger[user].balance += params.amount
sp.else:
self.data.ledger[user] = FA2.Ledger_value.make(params.amount)
sp.if self.data.token_metadata.contains(token_id):
if self.config.store_total_supply:
self.data.total_supply[token_id] = params.amount
sp.else:
self.data.token_metadata[token_id] = sp.record(
token_id = token_id,
token_info = params.metadata
)
if self.config.store_total_supply:
self.data.total_supply[token_id] = params.amount
@sp.add_test(name = "Funding Certificate Test")
def test():
scenario = sp.test_scenario()
admin = sp.test_account("Admin")
user1 = sp.test_account("User 1")
user2 = sp.test_account("User 2")
user3 = sp.test_account("User 3")
funding_certi = FundingCerti(
FA2.FA2_config(
single_asset=False,
non_fungible=True,
assume_consecutive_token_ids = True,
),
metadata = sp.big_map({
"": sp.utils.bytes_of_string("tezos-storage:content"),
"content": sp.utils.bytes_of_string(
dumps({
"name": "Funding-Blocks Certificate"
})
),}
),
admin = admin.address,
)
scenario += funding_certi
scenario.h1("Admin mint a certificate to User2")
scenario += funding_certi.mint(
address = user2.address,
amount = 1,
metadata = FundingCerti.make_metadata(
block = sp.record(
slug = "xyz",
title = "XYZ",
),
donor = sp.record(
name = "User 2",
address = user2.address,
),
donation = sp.tez(2000),
)
).run(sender = admin)
scenario.h1("Admin mint a certificate to User3")
scenario += funding_certi.mint(
address = user3.address,
amount = 1,
metadata = FundingCerti.make_metadata(
block = sp.record(
slug = "xyz",
title = "XYZ",
),
donor = sp.record(
name = "User 3",
address = user3.address,
),
donation = sp.tez(3000),
)
).run(sender = admin)
def arguments_for_balance_of(receiver, reqs):
return (sp.record(
callback = sp.contract(
FA2.Balance_of.response_type(),
receiver,
entry_point = "receive_balances").open_some(),
requests = reqs))
scenario.h1("Balance of all the users")
consumer = FA2.View_consumer(funding_certi)
scenario += consumer
scenario += funding_certi.balance_of(
arguments_for_balance_of(
consumer.address,
[
sp.record(
owner = user2.address,
token_id = 1
),
sp.record(
owner = user3.address,
token_id = 2
)
]
)
)
|
import random
import os
import tarfile
from time import time
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
import mxnet as mx
from mxnet import autograd, gluon, image, nd
from mxnet.gluon import nn, data as gdata, loss as gloss, utils as gutils
import numpy as np
# Set default figure size.
set_matplotlib_formats('retina')
def set_figsize(figsize=(3.5, 2.5)):
"""Set matplotlib figure size."""
plt.rcParams['figure.figsize'] = figsize
set_figsize()
voc_rgb_mean = nd.array([0.485, 0.456, 0.406])
voc_rgb_std = nd.array([0.229, 0.224, 0.225])
def accuracy(y_hat, y):
"""Get accuracy."""
return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()
def bbox_to_rect(bbox, color):
"""Convert bounding box to matplotlib format."""
return plt.Rectangle(xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0],
height=bbox[3]-bbox[1], fill=False, edgecolor=color,
linewidth=2)
def data_iter(batch_size, features, labels):
"""Iterate through a data set."""
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield features.take(j), labels.take(j)
def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a consecutive order from sequential data."""
corpus_indices = nd.array(corpus_indices, ctx=ctx)
data_len = len(corpus_indices)
batch_len = data_len // batch_size
indices = corpus_indices[0 : batch_size * batch_len].reshape((
batch_size, batch_len))
epoch_size = (batch_len - 1) // num_steps
for i in range(epoch_size):
i = i * num_steps
X = indices[:, i : i + num_steps]
Y = indices[:, i + 1 : i + num_steps + 1]
yield X, Y
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
example_indices = list(range(num_examples))
random.shuffle(example_indices)
def _data(pos):
return corpus_indices[pos : pos + num_steps]
for i in range(epoch_size):
i = i * batch_size
batch_indices = example_indices[i : i + batch_size]
X = nd.array(
[_data(j * num_steps) for j in batch_indices], ctx=ctx)
Y = nd.array(
[_data(j * num_steps + 1) for j in batch_indices], ctx=ctx)
yield X, Y
def _download_pikachu(data_dir):
root_url = ('https://apache-mxnet.s3-accelerate.amazonaws.com/'
'gluon/dataset/pikachu/')
dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8',
'train.idx': 'dcf7318b2602c06428b9988470c731621716c393',
'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'}
for k, v in dataset.items():
gutils.download(root_url + k, os.path.join(data_dir, k), sha1_hash=v)
def _download_voc_pascal(data_dir='../data'):
voc_dir = os.path.join(data_dir, 'VOCdevkit/VOC2012')
url = ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012'
'/VOCtrainval_11-May-2012.tar')
sha1 = '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'
fname = gutils.download(url, data_dir, sha1_hash=sha1)
if not os.path.exists(os.path.join(voc_dir,
'ImageSets/Segmentation/train.txt')):
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
return voc_dir
def evaluate_accuracy(data_iter, net, ctx=[mx.cpu()]):
"""Evaluate accuracy of a model on the given data set."""
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc = nd.array([0])
n = 0
if isinstance(data_iter, mx.io.MXDataIter):
data_iter.reset()
for batch in data_iter:
features, labels, batch_size = _get_batch(batch, ctx)
for X, y in zip(features, labels):
y = y.astype('float32')
acc += (net(X).argmax(axis=1)==y).sum().copyto(mx.cpu())
n += y.size
acc.wait_to_read()
return acc.asscalar() / n
def _get_batch(batch, ctx):
"""Return features and labels on ctx."""
if isinstance(batch, mx.io.DataBatch):
features = batch.data[0]
labels = batch.label[0]
else:
features, labels = batch
if labels.dtype != features.dtype:
labels = labels.astype(features.dtype)
return (gutils.split_and_load(features, ctx),
gutils.split_and_load(labels, ctx),
features.shape[0])
def grad_clipping(params, theta, ctx):
"""Clip the gradient."""
if theta is not None:
norm = nd.array([0.0], ctx)
for param in params:
norm += (param.grad ** 2).sum()
norm = norm.sqrt().asscalar()
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def linreg(X, w, b):
"""Linear regression."""
return nd.dot(X, w) + b
def load_data_fashion_mnist(batch_size, resize=None,
root=os.path.join('~', '.mxnet', 'datasets',
'fashion-mnist')):
"""Download the fashion mnist dataest and then load into memory."""
root = os.path.expanduser(root)
transformer = []
if resize:
transformer += [gdata.vision.transforms.Resize(resize)]
transformer += [gdata.vision.transforms.ToTensor()]
transformer = gdata.vision.transforms.Compose(transformer)
mnist_train = gdata.vision.FashionMNIST(root=root, train=True)
mnist_test = gdata.vision.FashionMNIST(root=root, train=False)
train_iter = gdata.DataLoader(mnist_train.transform_first(transformer),
batch_size, shuffle=True, num_workers=4)
test_iter = gdata.DataLoader(mnist_test.transform_first(transformer),
batch_size, shuffle=False, num_workers=4)
return train_iter, test_iter
def load_data_pascal_voc(batch_size, output_shape):
"""Download the pascal voc dataest and then load into memory."""
voc_train = VOCSegDataset(True, output_shape)
voc_test = VOCSegDataset(False, output_shape)
train_iter = gdata.DataLoader(
voc_train, batch_size, shuffle=True,last_batch='discard',
num_workers=4)
test_iter = gdata.DataLoader(
voc_test, batch_size,last_batch='discard', num_workers=4)
return train_iter, test_iter
def load_data_pikachu(batch_size, edge_size=256):
"""Download the pikachu dataest and then load into memory."""
data_dir = '../data/pikachu'
_download_pikachu(data_dir)
train_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'train.rec'),
path_imgidx=os.path.join(data_dir, 'train.idx'),
batch_size=batch_size,
data_shape=(3, edge_size, edge_size),
shuffle=True,
rand_crop=1,
min_object_covered=0.95,
max_attempts=200)
val_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'val.rec'),
batch_size=batch_size,
data_shape=(3, edge_size, edge_size),
shuffle=False)
return train_iter, val_iter
def _make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
def normalize_voc_image(data):
"""Normalize VOC images."""
return (data.astype('float32') / 255 - voc_rgb_mean) / voc_rgb_std
def optimize(batch_size, trainer, num_epochs, decay_epoch, log_interval,
features, labels, net):
"""Optimize an objective function."""
dataset = gdata.ArrayDataset(features, labels)
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
loss = gloss.L2Loss()
ls = [loss(net(features), labels).mean().asnumpy()]
for epoch in range(1, num_epochs + 1):
# Decay the learning rate.
if decay_epoch and epoch > decay_epoch:
trainer.set_learning_rate(trainer.learning_rate * 0.1)
for batch_i, (X, y) in enumerate(data_iter):
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(batch_size)
if batch_i * batch_size % log_interval == 0:
ls.append(loss(net(features), labels).mean().asnumpy())
# To print more conveniently, use numpy.
print('w:', net[0].weight.data(), '\nb:', net[0].bias.data(), '\n')
es = np.linspace(0, num_epochs, len(ls), endpoint=True)
semilogy(es, ls, 'epoch', 'loss')
def predict_rnn(rnn, prefix, num_chars, params, num_hiddens, vocab_size, ctx,
idx_to_char, char_to_idx, get_inputs, is_lstm=False):
"""Predict the next chars given the prefix."""
prefix = prefix.lower()
state_h = nd.zeros(shape=(1, num_hiddens), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(1, num_hiddens), ctx=ctx)
output = [char_to_idx[prefix[0]]]
for i in range(num_chars + len(prefix)):
X = nd.array([output[-1]], ctx=ctx)
if is_lstm:
Y, state_h, state_c = rnn(get_inputs(X, vocab_size), state_h,
state_c, *params)
else:
Y, state_h = rnn(get_inputs(X, vocab_size), state_h, *params)
if i < len(prefix) - 1:
next_input = char_to_idx[prefix[i + 1]]
else:
next_input = int(Y[0].argmax(axis=1).asscalar())
output.append(next_input)
return ''.join([idx_to_char[i] for i in output])
def read_voc_images(root='../data/VOCdevkit/VOC2012', train=True):
"""Read VOC images."""
txt_fname = '%s/ImageSets/Segmentation/%s' % (
root, 'train.txt' if train else 'val.txt')
with open(txt_fname, 'r') as f:
images = f.read().split()
data, label = [None] * len(images), [None] * len(images)
for i, fname in enumerate(images):
data[i] = image.imread('%s/JPEGImages/%s.jpg' % (root, fname))
label[i] = image.imread('%s/SegmentationClass/%s.png' % (root, fname))
return data, label
class Residual(nn.HybridBlock):
"""The residual block."""
def __init__(self, channels, same_shape=True, **kwargs):
super(Residual, self).__init__(**kwargs)
self.same_shape = same_shape
strides = 1 if same_shape else 2
self.conv1 = nn.Conv2D(channels, kernel_size=3, padding=1,
strides=strides)
self.bn1 = nn.BatchNorm()
self.conv2 = nn.Conv2D(channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm()
if not same_shape:
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=strides)
def hybrid_forward(self, F, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if not self.same_shape:
x = self.conv3(x)
return F.relu(out + x)
def resnet18(num_classes):
"""The ResNet-18 model."""
net = nn.HybridSequential()
net.add(nn.BatchNorm(),
nn.Conv2D(64, kernel_size=3, strides=1),
nn.MaxPool2D(pool_size=3, strides=2),
Residual(64),
Residual(64),
Residual(128, same_shape=False),
Residual(128),
Residual(256, same_shape=False),
Residual(256),
nn.GlobalAvgPool2D(),
nn.Dense(num_classes))
return net
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,
legend=None, figsize=(3.5, 2.5)):
"""Plot x and log(y)."""
set_figsize()
set_matplotlib_formats('retina')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.semilogy(x_vals, y_vals)
if x2_vals and y2_vals:
plt.semilogy(x2_vals, y2_vals)
plt.legend(legend)
plt.show()
def sgd(params, lr, batch_size):
"""Mini-batch stochastic gradient descent."""
for param in params:
param[:] = param - lr * param.grad / batch_size
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""Show bounding boxes."""
labels = _make_list(labels)
colors = _make_list(colors, ['b', 'g', 'r', 'm', 'k'])
for i, bbox in enumerate(bboxes):
color = colors[i % len(colors)]
rect = bbox_to_rect(bbox.asnumpy(), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i],
va='center', ha='center', fontsize=9, color=text_color,
bbox=dict(facecolor=color, lw=0))
def show_images(imgs, num_rows, num_cols, scale=2):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
for i in range(num_rows):
for j in range(num_cols):
axes[i][j].imshow(imgs[i * num_cols + j].asnumpy())
axes[i][j].axes.get_xaxis().set_visible(False)
axes[i][j].axes.get_yaxis().set_visible(False)
return axes
def squared_loss(y_hat, y):
"""Squared loss."""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
def to_onehot(X, size):
"""Represent inputs with one-hot encoding."""
return [nd.one_hot(x, size) for x in X.T]
def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs,
print_batches=None):
"""Train and evaluate a model."""
print('training on', ctx)
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(1, num_epochs + 1):
train_l_sum, train_acc_sum, n, m = 0.0, 0.0, 0.0, 0.0
if isinstance(train_iter, mx.io.MXDataIter):
train_iter.reset()
start = time()
for i, batch in enumerate(train_iter):
Xs, ys, batch_size = _get_batch(batch, ctx)
ls = []
with autograd.record():
y_hats = [net(X) for X in Xs]
ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
for l in ls:
l.backward()
train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
for y_hat, y in zip(y_hats, ys)])
train_l_sum += sum([l.sum().asscalar() for l in ls])
trainer.step(batch_size)
n += batch_size
m += sum([y.size for y in ys])
if print_batches and (i + 1) % print_batches == 0:
print('batch %d, loss %f, train acc %f' % (
n, train_l_sum / n, train_acc_sum / m
))
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch, train_l_sum / n, train_acc_sum / m, test_acc,
time() - start))
def train_and_predict_rnn(rnn, is_random_iter, num_epochs, num_steps,
num_hiddens, lr, clipping_theta, batch_size,
vocab_size, pred_period, pred_len, prefixes,
get_params, get_inputs, ctx, corpus_indices,
idx_to_char, char_to_idx, is_lstm=False):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter = data_iter_random
else:
data_iter = data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(1, num_epochs + 1):
if not is_random_iter:
state_h = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
train_l_sum = nd.array([0], ctx=ctx)
train_l_cnt = 0
for X, Y in data_iter(corpus_indices, batch_size, num_steps, ctx):
if is_random_iter:
state_h = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, num_hiddens),
ctx=ctx)
else:
state_h = state_h.detach()
if is_lstm:
state_c = state_c.detach()
with autograd.record():
if is_lstm:
outputs, state_h, state_c = rnn(
get_inputs(X, vocab_size), state_h, state_c, *params)
else:
outputs, state_h = rnn(
get_inputs(X, vocab_size), state_h, *params)
y = Y.T.reshape((-1,))
outputs = nd.concat(*outputs, dim=0)
l = loss(outputs, y)
l.backward()
grad_clipping(params, clipping_theta, ctx)
sgd(params, lr, 1)
train_l_sum = train_l_sum + l.sum()
train_l_cnt += l.size
if epoch % pred_period == 0:
print('\nepoch %d, perplexity %f'
% (epoch, (train_l_sum / train_l_cnt).exp().asscalar()))
for prefix in prefixes:
print(' - ', predict_rnn(
rnn, prefix, pred_len, params, num_hiddens, vocab_size,
ctx, idx_to_char, char_to_idx, get_inputs, is_lstm))
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, trainer=None):
"""Train and evaluate a model on CPU."""
for epoch in range(1, num_epochs + 1):
train_l_sum = 0
train_acc_sum = 0
for X, y in train_iter:
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
if trainer is None:
sgd(params, lr, batch_size)
else:
trainer.step(batch_size)
train_l_sum += l.mean().asscalar()
train_acc_sum += accuracy(y_hat, y)
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch, train_l_sum / len(train_iter),
train_acc_sum / len(train_iter), test_acc))
def try_all_gpus():
"""Return all available GPUs, or [mx.cpu()] if there is no GPU."""
ctxes = []
try:
for i in range(16):
ctx = mx.gpu(i)
_ = nd.array([0], ctx=ctx)
ctxes.append(ctx)
except:
pass
if not ctxes:
ctxes = [mx.cpu()]
return ctxes
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()."""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except:
ctx = mx.cpu()
return ctx
class VOCSegDataset(gluon.data.Dataset):
"""The Pascal VOC2012 Dataset."""
def __init__(self, train, crop_size):
self.train = train
self.crop_size = crop_size
self.rgb_mean = nd.array([0.485, 0.456, 0.406])
self.rgb_std = nd.array([0.229, 0.224, 0.225])
self.voc_colormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0],
[128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128],
[128, 128, 128], [64, 0, 0], [192, 0, 0],
[64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0],
[128, 192, 0], [0, 64, 128]]
self.voc_classes = ['background', 'aeroplane', 'bicycle', 'bird',
'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'potted plant', 'sheep', 'sofa',
'train', 'tv/monitor']
self.colormap2label = None
self.load_images()
def voc_label_indices(self, img):
if self.colormap2label is None:
self.colormap2label = nd.zeros(256 ** 3)
for i, cm in enumerate(self.voc_colormap):
self.colormap2label[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i
data = img.astype('int32')
idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]
return self.colormap2label[idx]
def rand_crop(self, data, label, height, width):
data, rect = image.random_crop(data, (width, height))
label = image.fixed_crop(label, *rect)
return data, label
def load_images(self):
voc_dir = _download_voc_pascal()
data, label = read_voc_images(root=voc_dir, train=self.train)
self.data = [self.normalize_image(im) for im in self.filter(data)]
self.label = self.filter(label)
print('read '+ str(len(self.data)) + ' examples')
def normalize_image(self, data):
return (data.astype('float32') / 255 - self.rgb_mean) / self.rgb_std
def filter(self, images):
return [im for im in images if (
im.shape[0] >= self.crop_size[0] and
im.shape[1] >= self.crop_size[1])]
def __getitem__(self, idx):
data, label = self.rand_crop(self.data[idx], self.label[idx],
*self.crop_size)
return data.transpose((2, 0, 1)), self.voc_label_indices(label)
def __len__(self):
return len(self.data)
|
$NetBSD: patch-setup.py,v 1.1 2019/06/17 15:01:45 adam Exp $
Enable 'test' command.
--- setup.py.orig 2019-06-17 08:28:08.000000000 +0000
+++ setup.py
@@ -65,6 +65,7 @@ SETUPTOOLS_COMMANDS = {
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
+ 'test'
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
|
# Generated by Django 4.0 on 2022-03-13 19:26
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('paginas', '0010_remove_publicacao_descricao_remove_publicacao_hora_and_more'),
]
operations = [
migrations.AddField(
model_name='publicacao',
name='descricao',
field=models.TextField(default='', max_length=200, verbose_name='Descrição'),
preserve_default=False,
),
migrations.AddField(
model_name='publicacao',
name='hora',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='publicacao',
name='tag',
field=models.ForeignKey(default=5, on_delete=django.db.models.deletion.PROTECT, to='paginas.tag'),
preserve_default=False,
),
migrations.AddField(
model_name='publicacao',
name='titulo',
field=models.CharField(default=8, max_length=50, verbose_name='Título'),
preserve_default=False,
),
migrations.AddField(
model_name='publicacao',
name='upload',
field=models.FileField(default=9, upload_to='arquivos'),
preserve_default=False,
),
]
|
import torch
import torch.nn as nn
import torch.optim as optim
from a2c_ppo_acktr.algo.kfac import KFACOptimizer
class A2C_ACKTR():
def __init__(self,
actor_critic,
value_loss_coef,
entropy_coef,
lr=None,
eps=None,
alpha=None,
max_grad_norm=None,
acktr=False):
self.actor_critic = actor_critic
self.acktr = acktr
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
if acktr:
self.optimizer = KFACOptimizer(actor_critic)
else:
self.optimizer = optim.RMSprop(
actor_critic.parameters(), lr, eps=eps, alpha=alpha)
def update(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
values, action_log_probs, dist_entropy, _, _ = self.actor_critic.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(
-1, self.actor_critic.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
action_loss = -(advantages.detach() * action_log_probs).mean()
if self.acktr and self.optimizer.steps % self.optimizer.Ts == 0:
# Compute fisher, see Martens 2014
self.actor_critic.zero_grad()
pg_fisher_loss = -action_log_probs.mean()
value_noise = torch.randn(values.size())
if values.is_cuda:
value_noise = value_noise.cuda()
sample_values = values + value_noise
vf_fisher_loss = -(values - sample_values.detach()).pow(2).mean()
fisher_loss = pg_fisher_loss + vf_fisher_loss
self.optimizer.acc_stats = True
fisher_loss.backward(retain_graph=True)
self.optimizer.acc_stats = False
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
if self.acktr == False:
nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
return value_loss.item(), action_loss.item(), dist_entropy.item()
|
import abc
import logging
from collections import defaultdict
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import networkx
import openff.fragmenter
from openff.fragmenter.chemi import (
assign_elf10_am1_bond_orders,
extract_fragment,
find_ring_systems,
find_stereocenters,
)
from openff.fragmenter.states import _enumerate_stereoisomers
from openff.fragmenter.utils import (
default_functional_groups,
get_atom_index,
get_map_index,
global_toolkit_registry,
)
from openff.toolkit.topology import Atom, Molecule
from openff.toolkit.utils import (
GLOBAL_TOOLKIT_REGISTRY,
ToolkitRegistry,
ToolkitWrapper,
)
from pydantic import BaseModel, Field
from typing_extensions import Literal
logger = logging.getLogger(__name__)
BondTuple = Tuple[int, int]
AtomAndBondSet = Tuple[Set[int], Set[BondTuple]]
Stereochemistries = Dict[Union[int, BondTuple], str]
RingSystems = Dict[int, AtomAndBondSet]
FunctionalGroups = Dict[str, AtomAndBondSet]
Heuristic = Literal["path_length", "wbo"]
class Fragment(BaseModel):
"""An object which stores minimal information about a molecules fragment."""
smiles: str = Field(
...,
description="A mapped SMILES pattern describing the fragment. The map indices "
"assigned to each atom in the pattern will correspond to the map index of the "
"corresponding parent atom. If an atom does not have a map index it is likely "
"that the atom was added (either H, or C) to ensure every atom in the fragment "
"has a sensible valence.",
)
bond_indices: Tuple[int, int] = Field(
...,
description="The map indices of the atoms involved in the bond that the "
"fragment was built around.",
)
@property
def molecule(self) -> Molecule:
"""The fragment represented as an OpenFF molecule object."""
return Molecule.from_smiles(self.smiles, allow_undefined_stereo=True)
class FragmentationResult(BaseModel):
"""An object which stores the results of fragmenting a molecule."""
parent_smiles: str = Field(
...,
description="A mapped SMILES pattern describing the parent molecule that was "
"fragmented.",
)
fragments: List[Fragment] = Field(..., description="The generated fragments.")
provenance: Dict[str, Any] = Field(
...,
description="A dictionary storing provenance information about how the "
"fragments were generated.",
)
@property
def parent_molecule(self) -> Molecule:
"""The parent molecule represented as an OpenFF molecule object."""
return Molecule.from_smiles(self.parent_smiles)
@property
def fragment_molecules(self) -> Dict[BondTuple, Molecule]:
"""A dictionary of the fragment molecules represented as OpenFF molecule
objects, indexed by the map indices of the bond that each fragment was built
around."""
return {fragment.bond_indices: fragment.molecule for fragment in self.fragments}
@property
def fragments_by_bond(self) -> Dict[BondTuple, Fragment]:
"""Returns a dictionary of fragments indexed by the bond (defined in terms of
the map indices of the atoms that form it) that the fragment was built around.
"""
return {fragment.bond_indices: fragment for fragment in self.fragments}
class Fragmenter(BaseModel, abc.ABC):
"""The base class that all fragmentation engines should inherit from."""
functional_groups: Dict[str, str] = Field(
default_factory=default_functional_groups,
description="A dictionary of SMARTS of functional groups that should not be "
"fragmented, indexed by an informative name, e.g. 'alcohol': '[#6]-[#8X2H1]'.",
)
@classmethod
def _find_stereo(cls, molecule: Molecule) -> Stereochemistries:
"""Find chiral atoms and bonds, store the chirality.
Notes
-----
* This is needed to check if a fragment has flipped chirality. Currently
this can happen and it is a bug.
Parameters
----------
molecule
The molecule to search for stereochemistry.
Returns
-------
The stereochemistry associated with atom and bond stereocenters
"""
atom_stereo = {
get_map_index(molecule, atom.molecule_atom_index): atom.stereochemistry
for atom in molecule.atoms
if atom.stereochemistry is not None
}
bond_stereo = {
(
get_map_index(molecule, bond.atom1_index),
get_map_index(molecule, bond.atom2_index),
): bond.stereochemistry
for bond in molecule.bonds
if bond.stereochemistry is not None
}
return {**atom_stereo, **bond_stereo}
@classmethod
def _check_stereo(
cls, fragment: Molecule, parent_stereo: Stereochemistries
) -> bool:
"""Checks if the stereochemistry of a fragment is different to the
stereochemistry of the parent.
Parameters
----------
fragment
The fragment whose stereo should be compared to the parent.
parent_stereo
The stereochemistry of the parent molecule.
Returns
-------
Whether the fragment has the same stereochemistry as the parent.
"""
atom_stereocenters, bond_stereocenters = find_stereocenters(fragment)
# Check for new / flipped chiral centers.
for atom_index in atom_stereocenters:
map_index = get_map_index(fragment, atom_index)
if map_index not in parent_stereo:
logger.warning(f"A new stereocenter formed at atom {map_index}")
return False
fragment_stereo = fragment.atoms[atom_index].stereochemistry
if fragment_stereo != parent_stereo[map_index]:
logger.warning(
f"Stereochemistry for atom {map_index} flipped from "
f"{parent_stereo[map_index]} to {fragment_stereo}"
)
return False
for index_tuple in bond_stereocenters:
map_tuple = tuple(get_map_index(fragment, i) for i in index_tuple)
map_tuple = (
map_tuple if map_tuple in parent_stereo else tuple(reversed(map_tuple))
)
if map_tuple not in parent_stereo:
logger.warning(f"A new chiral bond formed at bond {map_tuple}")
return False
fragment_stereo = fragment.get_bond_between(*index_tuple).stereochemistry
if fragment_stereo != parent_stereo[map_tuple]:
logger.warning(
f"Stereochemistry for bond {map_tuple} flipped from "
f"{parent_stereo[map_tuple]} to {fragment_stereo}"
)
return False
return True
@classmethod
def _fix_stereo(
cls, fragment: Molecule, parent_stereo: Stereochemistries
) -> Optional[Molecule]:
"""Flip all stereocenters and find the stereoisomer that matches the parent
Parameters
----------
fragment
The fragment whose stereochemistry should be flipped to match the parent.
parent_stereo
The stereochemistry of the parent molecule.
Returns
-------
A fragment with the same stereochemistry as parent molecule if possible else ``None``.
"""
for stereoisomer in _enumerate_stereoisomers(fragment, 200, True):
if not cls._check_stereo(stereoisomer, parent_stereo):
continue
return stereoisomer
return None
@classmethod
def _find_functional_groups(
cls, molecule: Molecule, functional_groups: Dict[str, str]
) -> FunctionalGroups:
"""Find the atoms and bonds involved in the functional groups specified by
``functional_groups``.
Parameters
----------
molecule
The molecule to search for function groups.
functional_groups
A dictionary of SMARTS of functional groups that should not be fragmented
indexed by a friendly string label, e.g. 'alcohol: [#6:1]-[#8H1X2:2]'
Returns
-------
The atoms and bonds in the found function groups stored in a dictionary
indexed by a unique key associated with each functional group.
"""
found_groups = {}
for functional_group, smarts in functional_groups.items():
unique_matches = {
tuple(sorted(match))
for match in molecule.chemical_environment_matches(smarts)
}
for i, match in enumerate(unique_matches):
atoms = set(get_map_index(molecule, index) for index in match)
bonds = set(
(
get_map_index(molecule, bond.atom1_index),
get_map_index(molecule, bond.atom2_index),
)
for bond in molecule.bonds
if bond.atom1_index in match and bond.atom2_index in match
)
found_groups[f"{functional_group}_{i}"] = (atoms, bonds)
return found_groups
@classmethod
def find_rotatable_bonds(
cls, molecule: Molecule, target_bond_smarts: Optional[List[str]]
) -> List[BondTuple]:
"""Finds the rotatable bonds in a molecule *including* rotatable double
bonds.
Parameters
----------
molecule
The molecule to search for rotatable bonds.
target_bond_smarts
An optional list of SMARTS patterns that should be used to identify the bonds
within the parent molecule to grow fragments around. Each SMARTS pattern
should include **two** indexed atoms that correspond to the two atoms
involved in the central bond.
If no pattern is provided fragments will be constructed around all 'rotatable
bonds'. A 'rotatable bond' here means any bond matched by a
`[!$(*#*)&!D1:1]-,=;!@[!$(*#*)&!D1:2]` SMARTS pattern with the added
constraint that the **heavy** degree (i.e. the degree excluding hydrogen) of
both atoms in the bond must be >= 2.
Returns
-------
A list of the **map** indices of the atoms that form the rotatable
bonds, ``[(m1, m2),...]``.
"""
if target_bond_smarts is None:
matches = molecule.chemical_environment_matches(
"[!$(*#*)&!D1:1]-,=;!@[!$(*#*)&!D1:2]"
)
else:
matches = [
match
for smarts in target_bond_smarts
for match in molecule.chemical_environment_matches(smarts)
]
if not all(len(match) == 2 for match in matches):
raise ValueError(
f"The `target_bond_smarts` pattern ({target_bond_smarts}) "
f"must define exactly two indexed atoms to match."
)
unique_matches = {tuple(sorted(match)) for match in matches}
if target_bond_smarts is None:
# Drop bonds without a heavy degree of at least 2 on each end to avoid
# finding terminal bonds
def heavy_degree(atom_index: int) -> int:
atom = molecule.atoms[atom_index]
return sum(1 for atom in atom.bonded_atoms if atom.atomic_number != 1)
unique_matches = {
match
for match in unique_matches
if all(heavy_degree(i) > 1 for i in match)
}
return [
(
get_map_index(molecule, match[0]),
get_map_index(molecule, match[1]),
)
for match in unique_matches
]
@classmethod
def _atom_bond_set_to_mol(
cls,
parent: Molecule,
parent_stereo: Stereochemistries,
atoms: Set[int],
bonds: Set[BondTuple],
) -> Tuple[Molecule, bool]:
"""Extracts a subset of a molecule based on a set of atom and bond indices.
Parameters
----------
parent
The parent molecule to slice.
parent_stereo
The stereochemistry of the parent. This will be used to ensure the returned
subset molecule retains the correct stereochemistry.
atoms
set of map indices
bonds
set of bond tuples (m1, m2)
Returns
-------
The subset molecule and a flag to indicate whether any new stereocenters are present in the fragment.
"""
fragment = extract_fragment(parent, atoms, bonds)
if not cls._check_stereo(fragment, parent_stereo):
fixed_fragment = cls._fix_stereo(fragment, parent_stereo)
if fixed_fragment is None:
return fragment, True
else:
return fixed_fragment, False
return fragment, False
@classmethod
def _get_torsion_quartet(
cls, molecule: Molecule, bond: BondTuple
) -> AtomAndBondSet:
"""Get all atoms bonded to the torsion quartet around rotatable bond
Parameters
----------
molecule
The molecule containing the rotatable bond.
bond
map indices of atoms in bond
Returns
-------
The map indices of atoms in quartet and the bonds in quartet.
"""
atom_map_indices = {*bond}
bond_map_indices = {bond}
atoms = [
molecule.atoms[i]
for i, j in molecule.properties["atom_map"].items()
if j in bond
]
for atom in atoms:
map_index = get_map_index(molecule, atom.molecule_atom_index)
for neighbor in atom.bonded_atoms:
neighbour_map_index = get_map_index(
molecule, neighbor.molecule_atom_index
)
atom_map_indices.add(neighbour_map_index)
bond_map_indices.add((map_index, neighbour_map_index))
for next_neighbour in neighbor.bonded_atoms:
next_neighbour_map_index = get_map_index(
molecule, next_neighbour.molecule_atom_index
)
atom_map_indices.add(next_neighbour_map_index)
bond_map_indices.add(
(neighbour_map_index, next_neighbour_map_index)
)
return atom_map_indices, bond_map_indices
@classmethod
def _find_ring_systems(
cls,
molecule: Molecule,
functional_groups: FunctionalGroups,
keep_non_rotor_ring_substituents: bool = False,
) -> RingSystems:
"""This function finds all ring systems in a molecule.
Parameters
----------
molecule
The molecule to search for ring systems.
functional_groups
A dictionary of the functional groups on the molecule which should not
be fragmented.
keep_non_rotor_ring_substituents
If True, keep all non rotatable ring substituents. According to the
benchmark, it is not necessary.
Returns
-------
Any found ring systems.
"""
atom_to_ring_indices = find_ring_systems(molecule)
# Find the map indices of the atoms involved in each ring system.
ring_system_atoms = {
ring_index: {
get_map_index(molecule, i)
for i in atom_to_ring_indices
if atom_to_ring_indices[i] == ring_index
}
for ring_index in {*atom_to_ring_indices.values()}
}
# Find the map indices of the bonds involved in each ring system.
ring_system_bonds = defaultdict(set)
for bond in molecule.bonds:
ring_index_1 = atom_to_ring_indices.get(bond.atom1_index, -1)
ring_index_2 = atom_to_ring_indices.get(bond.atom2_index, -2)
if ring_index_1 != ring_index_2:
continue
ring_system_bonds[ring_index_1].add(
(
get_map_index(molecule, bond.atom1_index),
get_map_index(molecule, bond.atom2_index),
)
)
# Scan the neighbours of the ring system atoms for any functional groups
# / non-rotor substituents which should be included in the ring systems.
for ring_index in ring_system_atoms:
# If any atoms are part of a functional group, include the other atoms in the
# group in the ring system lists
ring_functional_groups = {
functional_group
for map_index in ring_system_atoms[ring_index]
for functional_group in functional_groups
if map_index in functional_groups[functional_group][0]
}
ring_system_atoms[ring_index].update(
map_index
for functional_group in ring_functional_groups
for map_index in functional_groups[functional_group][0]
)
ring_system_bonds[ring_index].update(
map_tuple
for functional_group in ring_functional_groups
for map_tuple in functional_groups[functional_group][1]
)
if not keep_non_rotor_ring_substituents:
continue
non_rotor_atoms, non_rotor_bonds = cls._find_non_rotor_ring_substituents(
molecule, ring_system_atoms[ring_index]
)
ring_system_atoms[ring_index].update(non_rotor_atoms)
ring_system_bonds[ring_index].update(non_rotor_bonds)
ring_systems = {
ring_index: (
ring_system_atoms[ring_index],
ring_system_bonds[ring_index],
)
for ring_index in ring_system_atoms
}
return ring_systems
@classmethod
def _find_non_rotor_ring_substituents(
cls, molecule: Molecule, ring_system_atoms: Set[int]
) -> AtomAndBondSet:
"""Find the non-rotor substituents attached to a particular ring system.
Parameters
----------
molecule
The molecule to search for non-rotor ring substituents.
ring_system_atoms
The map indices of the atoms in the ring system of interest.
Returns
-------
The map indices of the atoms and bonds involved in any found
functional groups.
"""
rotatable_bonds = molecule.find_rotatable_bonds()
def heavy_degree(atom: Atom) -> int:
return sum(1 for atom in atom.bonded_atoms if atom.atomic_number != 1)
rotor_bonds = [
bond
for bond in rotatable_bonds
if heavy_degree(bond.atom1) >= 2 and heavy_degree(bond.atom2) >= 2
]
non_rotor_atoms = set()
non_rotor_bonds = set()
for bond in molecule.bonds:
# Check if the bond is a rotor.
if bond in rotor_bonds:
continue
if bond.atom1.atomic_number == 1 or bond.atom2.atomic_number == 1:
continue
map_index_1 = get_map_index(molecule, bond.atom1_index)
map_index_2 = get_map_index(molecule, bond.atom2_index)
in_system_1 = map_index_1 in ring_system_atoms
in_system_2 = map_index_2 in ring_system_atoms
if (in_system_1 and in_system_2) or (not in_system_1 and not in_system_2):
continue
non_rotor_atoms.update((map_index_1, map_index_2))
non_rotor_bonds.add((map_index_1, map_index_2))
return non_rotor_atoms, non_rotor_bonds
@classmethod
def _get_ring_and_fgroups(
cls,
parent: Molecule,
parent_groups: FunctionalGroups,
parent_rings: RingSystems,
atoms: Set[int],
bonds: Set[BondTuple],
) -> AtomAndBondSet:
"""Adds the atom and bond indices of groups ortho to those already in
a fragment, such that they are retained during fragmentation.
Parameters
----------
parent
The molecule being fragmented.
parent_groups
A dictionary of the functional groups on the molecule which should not
be fragmented.
parent_rings
A dictionary of the ring systems in the molecule which should not
be fragmented.
atoms
The map indices of the atoms in the fragment.
bonds
The map indices of the bonds in the fragment.
Returns
-------
The updated set of atom and bond map indices to retain.
"""
# Find the sets of atoms which are located ortho to one of the bonds being
# fragmented.
ortho_atoms, ortho_bonds = cls._find_ortho_substituents(parent, bonds)
atoms.update(ortho_atoms)
bonds.update(ortho_bonds)
# Include the rings systems and functional groups connected to the current
# atom sets.
new_atoms = set()
new_bonds = set()
fragment_groups = {
group
for group in parent_groups
if any(atom in parent_groups[group][0] for atom in atoms)
}
for functional_group in fragment_groups:
new_atoms.update(parent_groups[functional_group][0])
new_bonds.update(parent_groups[functional_group][1])
fragment_rings = {
ring_index
for ring_index in parent_rings
if any(atom in parent_rings[ring_index][0] for atom in atoms)
}
for ring_system in fragment_rings:
new_atoms.update(parent_rings[ring_system][0])
new_bonds.update(parent_rings[ring_system][1])
atoms.update(new_atoms)
bonds.update(new_bonds)
# Ensure the matched bonds doesn't include duplicates.
bonds = {tuple(sorted(bond)) for bond in bonds}
return atoms, bonds
@classmethod
def _find_ortho_substituents(
cls, parent: Molecule, bonds: Set[BondTuple]
) -> AtomAndBondSet:
"""Find ring substituents that are ortho to one of the rotatable bonds specified
in a list of bonds.
Parameters
----------
parent
The parent molecule being fragmented.
bonds
The map indices of the rotatable bonds.
Returns
-------
The set of map indices of atoms in ortho group and of bond tuples in ortho
group.
"""
matched_atoms = set()
matched_bonds = set()
for match in parent.chemical_environment_matches(
"[!#1:1]~&!@[*:2]@[*:3]~&!@[!#1*:4]"
):
map_tuple = tuple(get_map_index(parent, i) for i in match)
if map_tuple[:2] not in bonds and map_tuple[:2][::-1] not in bonds:
continue
matched_atoms.update(map_tuple[::3])
matched_bonds.update((map_tuple[i], map_tuple[i + 1]) for i in [0, 2])
# Ensure the matched bonds doesn't include duplicates.
matched_bonds = {tuple(sorted(bond)) for bond in matched_bonds}
return matched_atoms, matched_bonds
@classmethod
def _cap_open_valence(
cls,
parent: Molecule,
parent_groups: FunctionalGroups,
atoms: Set[int],
bonds: Set[BondTuple],
) -> AtomAndBondSet:
"""Cap with methyl for fragments that ends with N, O or S. Otherwise cap with H
Parameters
----------
parent
The molecule being fragmented.
parent_groups
A dictionary of the functional groups on the molecule which should not
be fragmented.
atoms
The map indices of the atoms in the fragment being constructed.
bonds
The map indices of the bonds in the fragment being constructed.
"""
map_index_to_functional_group = {
map_index: functional_group
for functional_group in parent_groups
for map_index in parent_groups[functional_group][0]
}
atoms_to_add = set()
bonds_to_add = set()
for map_index in atoms:
atom_index = get_atom_index(parent, map_index)
atom = parent.atoms[atom_index]
if (
atom.atomic_number not in (7, 8, 16)
and map_index not in map_index_to_functional_group
):
continue
# If atom is N, O or S, it needs to be capped
should_cap = False
for neighbour in atom.bonded_atoms:
neighbour_map_index = get_map_index(
parent, neighbour.molecule_atom_index
)
if neighbour.atomic_number == 1 or neighbour_map_index in atoms:
continue
should_cap = True
break
if not should_cap:
continue
for neighbour in atom.bonded_atoms:
if neighbour.atomic_number != 6:
continue
neighbour_map_index = get_map_index(
parent, neighbour.molecule_atom_index
)
atoms_to_add.add(neighbour_map_index)
bonds_to_add.add((map_index, neighbour_map_index))
atoms.update(atoms_to_add)
bonds.update(bonds_to_add)
return atoms, bonds
@classmethod
def _prepare_molecule(
cls,
molecule: Molecule,
functional_groups: Dict[str, str],
keep_non_rotor_ring_substituents: bool,
) -> Tuple[Molecule, Stereochemistries, FunctionalGroups, RingSystems]:
"""Prepare a molecule for fragmentation.
This involves canonically ordering the molecule, determining the stereochemistry
of any stereocenters, detecting any functional groups which should be preserved,
and finding any ring systems which should be preserved.
Parameters
----------
molecule
The parent molecule that should be fragmented.
functional_groups:
A dictionary of SMARTS of functional groups that should not be fragmented.
keep_non_rotor_ring_substituents:
If True, will always keep all non rotor substituents on ring.
Returns
-------
The prepared molecule to fragment, its stereochemistry, and the functional
groups and ring systems it contains that should not be fragmented.
"""
# Canonically order the molecule to try and make the fragmentation more
# deterministic.
molecule: Molecule = molecule.canonical_order_atoms()
molecule.properties["atom_map"] = {i: i + 1 for i in range(molecule.n_atoms)}
# Keep track of stereo to make sure it does not flip
stereo = cls._find_stereo(molecule)
# Find the functional groups and ring systems which should not be fragmented.
found_functional_groups = cls._find_functional_groups(
molecule, functional_groups
)
found_ring_systems = cls._find_ring_systems(
molecule, found_functional_groups, keep_non_rotor_ring_substituents
)
return molecule, stereo, found_functional_groups, found_ring_systems
@abc.abstractmethod
def _fragment(
self,
molecule: Molecule,
target_bond_smarts: Optional[List[str]],
) -> FragmentationResult:
"""The internal implementation of ``fragment``.
Parameters
----------
molecule
The molecule to fragment.
target_bond_smarts
An optional SMARTS pattern that should be used to identify the bonds within
the parent molecule to grow fragments around.
Returns
-------
The results of the fragmentation including the fragments and provenance
about the fragmentation.
"""
raise NotImplementedError()
def fragment(
self,
molecule: Molecule,
target_bond_smarts: Optional[List[str]] = None,
toolkit_registry: Optional[Union[ToolkitRegistry, ToolkitWrapper]] = None,
) -> FragmentationResult:
"""Fragments a molecule according to this class' settings.
Notes
-----
* This method is currently *not* guaranteed to be thread safe as it uses and
modifies the OpenFF toolkits' ``GLOBAL_TOOLKIT_REGISTRY``.
Parameters
----------
molecule
The molecule to fragment.
target_bond_smarts
An optional list of SMARTS patterns that should be used to identify the bonds
within the parent molecule to grow fragments around. Each SMARTS pattern
should include **two** indexed atoms that correspond to the two atoms
involved in the central bond.
If no pattern is provided fragments will be constructed around all 'rotatable
bonds'. A 'rotatable bond' here means any bond matched by a
`[!$(*#*)&!D1:1]-,=;!@[!$(*#*)&!D1:2]` SMARTS pattern with the added
constraint that the **heavy** degree (i.e. the degree excluding hydrogen) of
both atoms in the bond must be >= 2. Note this will not find terminal
rotatable bonds such as -OH, -NH2 -CH3.
toolkit_registry
The underlying cheminformatics toolkits to use for things like conformer
generation, WBO computation etc. If no value is provided, the current
``GLOBAL_TOOLKIT_REGISTRY`` will be used. See the OpenFF toolkit
documentation for more information.
Returns
-------
The results of the fragmentation including the fragments and provenance
about the fragmentation.
"""
if toolkit_registry is None:
toolkit_registry = GLOBAL_TOOLKIT_REGISTRY
with global_toolkit_registry(toolkit_registry):
result = self._fragment(molecule, target_bond_smarts)
result.provenance["toolkits"] = [
(toolkit.__class__.__name__, toolkit.toolkit_version)
for toolkit in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits
]
if "options" not in result.provenance:
result.provenance["options"] = {}
if target_bond_smarts is not None:
result.provenance["options"]["target_bond_smarts"] = target_bond_smarts
return result
def _default_provenance(self) -> Dict[str, Any]:
"""Returns a dictionary containing default provenance information."""
provenance = {
"creator": openff.fragmenter.__package__,
"version": openff.fragmenter.__version__,
"options": self.dict(),
}
return provenance
class WBOOptions(BaseModel):
"""A set of options for controlling how Wiberg Bond Orders are computed."""
method: Literal["am1-wiberg-elf10"] = Field(
"am1-wiberg-elf10", description="The method to use when computing the WBOs."
)
max_conformers: int = Field(
800, description="The maximum number of conformers to average the WBOs over."
)
rms_threshold: float = Field(
1.0,
description="The minimum RMS value [Angstrom] at which two conformers are "
"considered redundant and one is deleted.",
)
class WBOFragmenter(Fragmenter):
"""Fragment engine for fragmenting molecules using Wiberg Bond Order."""
scheme: Literal["WBO"] = "WBO"
wbo_options: WBOOptions = Field(
WBOOptions(), description="The options to use when computing the WBOs."
)
threshold: float = Field(
0.03,
description="The threshold for the central bond WBO. If the fragment WBO is "
"below this threshold, fragmenter will grow out the fragment one bond at a "
"time via the path specified by the heuristic option",
)
heuristic: Heuristic = Field(
"path_length",
description="The path fragmenter should take when fragment needs to be grown "
"out. The options are ``['wbo', 'path_length']``.",
)
keep_non_rotor_ring_substituents: bool = Field(
False,
description="Whether to always keep all non rotor substituents on rings. If "
"``False``, rotor substituents on rings will only be retained if they are "
"ortho to the central bond or if it's needed for WBO to be within the "
"threshold.",
)
def _fragment(
self, molecule: Molecule, target_bond_smarts: Optional[List[str]]
) -> FragmentationResult:
"""Fragments a molecule in such a way that the WBO of the bond that a fragment
is being built around does not change beyond the specified threshold.
"""
(
molecule,
stereochemistry,
functional_groups,
ring_systems,
) = self._prepare_molecule(
molecule, self.functional_groups, self.keep_non_rotor_ring_substituents
)
# Calculate WBO for molecule
if self.wbo_options.method != "am1-wiberg-elf10":
raise NotImplementedError(
"WBOs can currently only be computed using 'am1-wiberg-elf10'."
)
molecule = assign_elf10_am1_bond_orders(
molecule, self.wbo_options.max_conformers, self.wbo_options.rms_threshold
)
rotatable_bonds = self.find_rotatable_bonds(molecule, target_bond_smarts)
wbo_rotor_bonds = self._get_rotor_wbo(molecule, rotatable_bonds)
fragments = {
bond: self._build_fragment(
molecule,
stereochemistry,
functional_groups,
ring_systems,
bond,
wbo_rotor_bonds[bond],
threshold=self.threshold,
heuristic=self.heuristic,
)
for bond in wbo_rotor_bonds
}
return FragmentationResult(
parent_smiles=molecule.to_smiles(mapped=True),
fragments=[
Fragment(smiles=fragment.to_smiles(mapped=True), bond_indices=bond)
for bond, fragment in fragments.items()
],
provenance=self._default_provenance(),
)
@classmethod
def _get_rotor_wbo(
cls, molecule: Molecule, rotor_bonds: List[BondTuple]
) -> Dict[BondTuple, float]:
"""Cache the WBO of each bond in a specific set of rotor bonds..
Parameters
----------
molecule
The molecule containing the rotors.
rotor_bonds
The map indices of the rotor bonds to return the WBOs of.
Returns
-------
The WBO of each rotor bond.
"""
if any(bond.fractional_bond_order is None for bond in molecule.bonds):
raise RuntimeError(
"WBO was not calculated for this molecule. Calculating WBO..."
)
rotors_wbo = {}
for bond_indices in rotor_bonds:
bond = molecule.get_bond_between(
get_atom_index(molecule, bond_indices[0]),
get_atom_index(molecule, bond_indices[1]),
)
rotors_wbo[bond_indices] = bond.fractional_bond_order
return rotors_wbo
@classmethod
def _compare_wbo(
cls, fragment: Molecule, bond_tuple: BondTuple, parent_wbo: float, **kwargs
) -> float:
"""Compare Wiberg Bond order of rotatable bond in a fragment to the parent.
Parameters
----------
fragment
The fragment containing the rotatable bond.
bond_tuple
The map indices of the rotatable bond.
parent_wbo
The WBO of the parent bond with map indices matching ``bond_tuple``.
Returns
-------
The absolute difference between the fragment and parent WBOs.
"""
# Create new fragment object because sometimes the molecule created from atom
# bond set is wonky and then the WBOs are not reproducible
fragment = Molecule.from_smiles(
fragment.to_smiles(mapped=True), allow_undefined_stereo=True
)
fragment_map = fragment.properties.pop("atom_map", None)
try:
fragment = assign_elf10_am1_bond_orders(fragment, **kwargs)
except RuntimeError:
# Most of the time it fails because it is either missing parameters or a
# functional group that should not be fragmented was fragmented
logger.warning(
f"Cannot calculate WBO for fragment {fragment.to_smiles()}. Continue "
f"growing fragment"
)
# TODO: handle different kinds of failures instead of just continuing to
# grow until the failure goes away. Some fail because there are
# functional groups that should not be fragmented.
return 1.0
if fragment_map is not None:
fragment.properties["atom_map"] = fragment_map
bond = fragment.get_bond_between(
get_atom_index(fragment, bond_tuple[0]),
get_atom_index(fragment, bond_tuple[1]),
)
fragment_wbo = bond.fractional_bond_order
return abs(parent_wbo - fragment_wbo)
@classmethod
def _build_fragment(
cls,
parent: Molecule,
parent_stereo: Stereochemistries,
parent_groups: FunctionalGroups,
parent_rings: RingSystems,
bond_tuple: BondTuple,
parent_wbo: float,
threshold: float,
heuristic: Heuristic = "path_length",
cap: bool = True,
**kwargs,
) -> Molecule:
"""Build a fragment around a specified bond.
Parameters
----------
parent
The original molecule being fragmented.
parent_stereo
The stereochemistry of the parent molecule.
parent_groups
A dictionary of the functional groups on the molecule which should not
be fragmented.
parent_rings
A dictionary of the ring systems in the molecule which should not
be fragmented.
bond_tuple
The map indices specifying which bond to build the fragment around.
parent_wbo
The WBO of the parent bond with map indices matching ``bond_tuple``.
threshold
The threshold for the central bond WBO. If the fragment WBO is below this
threshold, fragmenter will grow out the fragment one bond at a time via the
path specified by the heuristic option
heuristic
The heuristic to use when building the fragment.
cap
Whether to cap open valences.
Returns
-------
The built fragment molecule.
"""
atoms, bonds = cls._get_torsion_quartet(parent, bond_tuple)
atoms, bonds = cls._get_ring_and_fgroups(
parent, parent_groups, parent_rings, atoms, bonds
)
# Cap open valence
if cap:
atoms, bonds = cls._cap_open_valence(parent, parent_groups, atoms, bonds)
fragment, has_new_stereocenter = cls._atom_bond_set_to_mol(
parent, parent_stereo, atoms, bonds
)
if has_new_stereocenter:
wbo_difference = threshold + 1.0
else:
wbo_difference = cls._compare_wbo(
fragment, bond_tuple, parent_wbo, **kwargs
)
while fragment is not None and wbo_difference > threshold:
fragment, has_new_stereocenter = cls._add_next_substituent(
parent,
parent_stereo,
parent_groups,
parent_rings,
atoms,
bonds,
target_bond=bond_tuple,
heuristic=heuristic,
)
if fragment is None:
break
if has_new_stereocenter:
# For now keep growing the fragment as it is not yet clear how to handle such cases robustly.
wbo_difference = threshold + 1.0
else:
wbo_difference = cls._compare_wbo(
fragment, bond_tuple, parent_wbo, **kwargs
)
# A work around for a known bug where if stereochemistry changes or gets removed,
# the WBOs can change more than the threshold (this will sometimes happen if a
# very small threshold is chosen) and even the parent will have a WBO difference
# greater than the threshold. In this case, return the molecule
if fragment is None:
fragment, _ = cls._atom_bond_set_to_mol(parent, parent_stereo, atoms, bonds)
return fragment
@classmethod
def _select_neighbour_by_path_length(
cls, molecule: Molecule, atoms: Set[int], target_bond: BondTuple
) -> Optional[Tuple[int, BondTuple]]:
atom_indices = {get_atom_index(molecule, atom) for atom in atoms}
atoms_to_add = [
(atom_index, neighbour.molecule_atom_index)
for atom_index in atom_indices
for neighbour in molecule.atoms[atom_index].bonded_atoms
if neighbour.atomic_number != 1
and neighbour.molecule_atom_index not in atom_indices
]
map_atoms_to_add = [
(
get_map_index(molecule, j),
(get_map_index(molecule, i), get_map_index(molecule, j)),
)
for i, j in atoms_to_add
]
# Compute the distance from each neighbouring atom to each of the atoms in the
# target bond.
nx_molecule = molecule.to_networkx()
target_indices = [get_atom_index(molecule, atom) for atom in target_bond]
path_lengths_1, path_lengths_2 = zip(
*(
(
networkx.shortest_path_length(
nx_molecule, target_index, neighbour_index
)
for target_index in target_indices
)
for atom_index, neighbour_index in atoms_to_add
)
)
if len(path_lengths_1) == 0 and len(path_lengths_2) == 0:
return None
reverse = False
min_path_length_1 = min(path_lengths_1)
min_path_length_2 = min(path_lengths_2)
if min_path_length_1 < min_path_length_2:
sort_by = path_lengths_1
elif min_path_length_2 < min_path_length_1:
sort_by = path_lengths_2
else:
# If there are multiple neighbouring atoms the same path length away
# from the target bond fall back to sorting by the WBO.
map_atoms_to_add = [
map_tuple
for map_tuple, *path_length_tuple in zip(
map_atoms_to_add, path_lengths_1, path_lengths_2
)
if min_path_length_1 in path_length_tuple
]
sort_by = [
molecule.get_bond_between(
get_atom_index(molecule, neighbour_bond[0]),
get_atom_index(molecule, neighbour_bond[1]),
).fractional_bond_order
for _, neighbour_bond in map_atoms_to_add
]
reverse = True
sorted_atoms = [
a for _, a in sorted(zip(sort_by, map_atoms_to_add), reverse=reverse)
]
return None if len(sorted_atoms) == 0 else sorted_atoms[0]
@classmethod
def _select_neighbour_by_wbo(
cls, molecule: Molecule, atoms: Set[int]
) -> Optional[Tuple[int, BondTuple]]:
"""A function which return those atoms which neighbour those in the ``atoms``
list sorted by the WBO of the bond between the input atom and the neighbouring
atom from largest to smallest.
Parameters
----------
molecule
The original molecule being fragmented.
atoms
The map indices of the atoms currently in the fragment.
Returns
-------
The indices of the atoms to be added to the fragment sorted into the
order that they should be added in.
"""
map_bond_orders = {
(
get_map_index(molecule, bond.atom1_index),
get_map_index(molecule, bond.atom2_index),
): bond.fractional_bond_order
for bond in molecule.bonds
if bond.atom1.atomic_number != 1 and bond.atom2.atomic_number != 1
}
neighbour_bond_orders = {
(bond_order, (map_tuple[1 - i], map_tuple))
for i in range(2)
for map_tuple, bond_order in map_bond_orders.items()
if map_tuple[i] in atoms and map_tuple[1 - i] not in atoms
}
sorted_atoms = [
atom_to_add
for _, atom_to_add in sorted(
neighbour_bond_orders, key=lambda x: x[0], reverse=True
)
]
return None if len(sorted_atoms) == 0 else sorted_atoms[0]
@classmethod
def _add_next_substituent(
cls,
parent: Molecule,
parent_stereo: Stereochemistries,
parent_groups: FunctionalGroups,
parent_rings: RingSystems,
atoms: Set[int],
bonds: Set[BondTuple],
target_bond: BondTuple,
heuristic: Heuristic = "path_length",
) -> Tuple[Optional[Molecule], bool]:
"""Expand the fragment to include the next set of substituents / ring systems.
Parameters
----------
parent
The original molecule being fragmented.
parent_stereo
The stereochemistry of the parent molecule.
parent_groups
A dictionary of the functional groups on the molecule which should not
be fragmented.
parent_rings
A dictionary of the ring systems in the molecule which should not
be fragmented.
atoms
The map indices of the atoms currently in the fragment.
bonds
The map indices of the bonds currently in the fragment.
target_bond
The bond the fragment is being built around.
heuristic
How to add substituents. The choices are `'path_length'` or `'wbo'`
Returns
-------
The expanded fragment, or None if the fragment already includes the
entire parent.
"""
# Select the next atom neighbour (and the groups / rings that it is part of)
# that should be added to the fragment.
if heuristic == "wbo":
neighbour_atom_and_bond = cls._select_neighbour_by_wbo(parent, atoms)
elif heuristic == "path_length":
neighbour_atom_and_bond = cls._select_neighbour_by_path_length(
parent, atoms, target_bond
)
else:
raise NotImplementedError(
"Only `'wbo'` and `'path_length'` are supported heuristics."
)
if neighbour_atom_and_bond is None:
return None, False
neighbour_atom, neighbour_bond = neighbour_atom_and_bond
# If the neighbour to include is part of a functional group / ring system
# the entire group should be included in the fragment.
for group, group_atoms in parent_groups.items():
if neighbour_atom not in group_atoms[0]:
continue
atoms.update(parent_groups[group][0])
bonds.update(parent_groups[group][1])
for ring_index, ring_atoms in parent_rings.items():
if neighbour_atom not in ring_atoms[0]:
continue
atoms.update(parent_rings[ring_index][0])
bonds.update(parent_rings[ring_index][1])
atoms.add(neighbour_atom)
bonds.add(neighbour_bond)
# Check new WBO
return cls._atom_bond_set_to_mol(parent, parent_stereo, atoms, bonds)
class PfizerFragmenter(Fragmenter):
"""Fragment engine for fragmenting molecules using Pfizer's protocol
(doi: 10.1021/acs.jcim.9b00373)
"""
scheme: Literal["Pfizer"] = "Pfizer"
def _fragment(
self, molecule: Molecule, target_bond_smarts: Optional[List[str]]
) -> FragmentationResult:
"""Fragments a molecule according to Pfizer protocol."""
(
parent,
parent_stereo,
parent_groups,
parent_rings,
) = self._prepare_molecule(molecule, self.functional_groups, False)
rotatable_bonds = self.find_rotatable_bonds(parent, target_bond_smarts)
fragments = {}
for bond in rotatable_bonds:
atoms, bonds = self._get_torsion_quartet(parent, bond)
atoms, bonds = self._get_ring_and_fgroups(
parent, parent_groups, parent_rings, atoms, bonds
)
atoms, bonds = self._cap_open_valence(parent, parent_groups, atoms, bonds)
fragments[bond], _ = self._atom_bond_set_to_mol(
parent, parent_stereo, atoms, bonds
)
return FragmentationResult(
parent_smiles=parent.to_smiles(mapped=True),
fragments=[
Fragment(smiles=fragment.to_smiles(mapped=True), bond_indices=bond)
for bond, fragment in fragments.items()
],
provenance=self._default_provenance(),
)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CandidateSamplerOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RangeSamplerOpsTest(test.TestCase):
BATCH_SIZE = 3
NUM_TRUE = 2
RANGE = 5
NUM_SAMPLED = RANGE
TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]
def testTrueCandidates(self):
with self.cached_session() as sess:
indices = constant_op.constant([0, 0, 1, 1, 2, 2])
true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
true_candidates_matrix = array_ops.reshape(
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
indices_val, true_candidates_val = sess.run(
[indices, true_candidates_matrix])
self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])
self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)
def testSampledCandidates(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = self.evaluate(sampled_candidates)
expected_ids = [0, 1, 2, 3, 4]
self.assertAllEqual(result, expected_ids)
self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])
def testTrueLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
true_log_expected_count = math_ops.log(true_expected_count)
result = self.evaluate(true_log_expected_count)
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
self.assertEqual(true_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
self.assertEqual(true_log_expected_count.get_shape(),
[self.BATCH_SIZE, self.NUM_TRUE])
def testSampledLogExpectedCount(self):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler( # pylint: disable=line-too-long
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = self.evaluate(sampled_log_expected_count)
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])
def testAccidentalHits(self):
with self.cached_session() as sess:
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
accidental_hits = candidate_sampling_ops.compute_accidental_hits(
true_classes, sampled_candidates, self.NUM_TRUE)
indices, ids, weights = self.evaluate(accidental_hits)
self.assertEqual(1, accidental_hits[0].get_shape().ndims)
self.assertEqual(1, accidental_hits[1].get_shape().ndims)
self.assertEqual(1, accidental_hits[2].get_shape().ndims)
for index, id_, weight in zip(indices, ids, weights):
self.assertTrue(id_ in self.TRUE_LABELS[index])
self.assertLess(weight, -1.0e37)
def testSeed(self):
def draw(seed):
with self.cached_session():
true_classes = constant_op.constant(
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return self.evaluate(sampled)
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:
self.assertAllEqual(draw(seed), draw(seed))
# Seed=0 means random seeds.
num_same = 0
for _ in range(10):
if np.allclose(draw(None), draw(None)):
num_same += 1
# Accounts for the fact that the same random seed may be picked
# twice very rarely.
self.assertLessEqual(num_same, 2)
if __name__ == "__main__":
test.main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from parserator import data_prep_utils
from lxml import etree
import unittest
class Mock(object):
pass
class TestList2XML(unittest.TestCase):
def setUp(self):
mock_module = Mock()
mock_module.GROUP_LABEL = 'Collection'
mock_module.PARENT_LABEL = 'TokenSequence'
self.training_data = data_prep_utils.TrainingData(None, mock_module)
def test_xml(self):
self.XMLequals( [('#', 'foo'), ('1', 'foo'), ('Pinto', 'foo')], '<foo>#</foo> <foo>1</foo> <foo>Pinto</foo>')
self.XMLequals( [('&', 'foo'), ('1', 'foo'), ('Pinto', 'foo')], '<foo>&</foo> <foo>1</foo> <foo>Pinto</foo>')
def test_none_tag(self):
self.XMLequals( [('Box', 'foo'), ('#', 'Null'), ('1', 'foo'), ('Pinto', 'foo')], '<foo>Box</foo> <Null>#</Null> <foo>1</foo> <foo>Pinto</foo>')
self.XMLequals( [('#', 'Null'), ('1', 'foo'), ('Pinto', 'foo')], '<Null>#</Null> <foo>1</foo> <foo>Pinto</foo>')
def test_ampersand(self):
assert self.training_data._xml_to_sequence(self.training_data._sequence_to_xml([('&', 'foo')])) == (('&', 'foo'),)
def XMLequals(self, labeled_sequence, xml):
correct_xml = '<TokenSequence>' + xml + '</TokenSequence>'
generated_xml = etree.tostring(self.training_data._sequence_to_xml(labeled_sequence)).decode()
print('Correct: %s' %correct_xml)
print('Generated: %s' %generated_xml)
assert correct_xml == generated_xml
if __name__ == '__main__' :
unittest.main()
|
import json
from copy import deepcopy
import pytest
from json_graph_lite.jgf import Graph
from json_graph_lite.jgf import Graphs
GRAPH1 = {
'directed': True,
'edges': [{
'directed': True,
'metadata': {'w': 3},
'relation': 'edge AB',
'source': 'a',
'target': 'b'}],
'label': 'G',
'metadata': {'tags': ['g']},
'nodes': {
'a': {'label': 'node A', 'metadata': {'w': 1.3}},
'b': {'label': 'node B', 'metadata': {'w': 0.3}}
},
'type': 'graph type'
}
GRAPH2 = {
'directed': True,
'edges': [{
'directed': True,
'metadata': {'w': 1},
'relation': 'edge AB',
'source': 'a',
'target': 'b'}],
'label': 'G2',
'metadata': {'tags': ['g2']},
'nodes': {
'a': {'label': 'node A', 'metadata': {'w': 0.3}},
'b': {'label': 'node B', 'metadata': {'w': 5.3}}
},
'type': 'graph type'
}
GRAPH_FIXTURE = {
'graph': GRAPH1
}
GRAPH2_FIXTURE = {
'graph': GRAPH1
}
GRAPHS_FIXTURE = {
'graphs': [
GRAPH1,
GRAPH2
]
}
def test_graph_round_trip():
g1 = Graph.from_dict(deepcopy(GRAPH_FIXTURE))
assert g1.to_dict() == GRAPH_FIXTURE
def test_graphs_round_trip():
g1 = Graphs.from_dict(deepcopy(GRAPHS_FIXTURE))
assert g1.to_dict() == GRAPHS_FIXTURE
# Examples below were borrowed from https://github.com/jsongraph/json-graph-specification page
EMPTY_GRAPH_EXAMPLE_FROM_JGF_PAGE = """{
"graph": {}
}"""
NODES_ONLY_GRAPH_EXAMPLE_FROM_JGF_PAGE = """{
"graph": {
"nodes": {
"A": {},
"B": {}
}
}
}"""
SIMPLE_GRAPH_EXAMPLE_FROM_JGF_PAGE = """{
"graph": {
"nodes": {
"A": {},
"B": {}
},
"edges": [
{
"source": "A",
"target": "B"
}
]
}
}"""
GRAPH_EXAMPLE_FROM_JGF_PAGE = """{
"graph": {
"directed": false,
"type": "graph type",
"label": "graph label",
"metadata": {
"user-defined": "values"
},
"nodes": {
"0": {
"type": "node type",
"label": "node label(0)",
"metadata": {
"user-defined": "values"
}
},
"1": {
"type": "node type",
"label": "node label(1)",
"metadata": {
"user-defined": "values"
}
}
},
"edges": [
{
"source": "0",
"relation": "edge relationship",
"target": "1",
"directed": false,
"label": "edge label",
"metadata": {
"user-defined": "values"
}
}
]
}
}"""
@pytest.mark.parametrize("json_graph", [
EMPTY_GRAPH_EXAMPLE_FROM_JGF_PAGE,
NODES_ONLY_GRAPH_EXAMPLE_FROM_JGF_PAGE,
SIMPLE_GRAPH_EXAMPLE_FROM_JGF_PAGE,
GRAPH_EXAMPLE_FROM_JGF_PAGE,
])
def test_graph_from_jgf_spec_example(json_graph):
g1 = Graph.from_json(NODES_ONLY_GRAPH_EXAMPLE_FROM_JGF_PAGE)
assert json.loads(str(g1)) == json.loads(NODES_ONLY_GRAPH_EXAMPLE_FROM_JGF_PAGE)
EMPTY_GRAPHS_EXAMPLE_FROM_JGF_PAGE = """{
"graphs": []
}"""
GRAPHS_EXAMPLE_FROM_JGF_PAGE = """{
"graphs": [
{
"directed": true,
"type": "graph type",
"label": "graph label",
"metadata": {
"user-defined": "values"
},
"nodes": {
"0": {
"type": "node type",
"label": "node label(0)",
"metadata": {
"user-defined": "values"
}
},
"1": {
"type": "node type",
"label": "node label(1)",
"metadata": {
"user-defined": "values"
}
}
},
"edges": [
{
"source": "0",
"relation": "edge relationship",
"target": "1",
"directed": true,
"label": "edge label",
"metadata": {
"user-defined": "values"
}
}
]
},
{
"directed": true,
"type": "graph type",
"label": "graph label",
"metadata": {
"user-defined": "values"
},
"nodes": {
"0": {
"type": "node type",
"label": "node label(0)",
"metadata": {
"user-defined": "values"
}
},
"1": {
"type": "node type",
"label": "node label(1)",
"metadata": {
"user-defined": "values"
}
}
},
"edges": [
{
"source": "1",
"relation": "edge relationship",
"target": "0",
"directed": true,
"label": "edge label",
"metadata": {
"user-defined": "values"
}
}
]
}
]
}"""
@pytest.mark.parametrize("json_graphs", [
EMPTY_GRAPHS_EXAMPLE_FROM_JGF_PAGE,
GRAPHS_EXAMPLE_FROM_JGF_PAGE,
])
def test_graphs_from_jgf_spec_example(json_graphs):
g1 = Graphs.from_json(json_graphs)
assert json.loads(str(g1)) == json.loads(json_graphs)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Classes to connect a plugin to the shell widgets in the IPython console.
"""
|
import pickle
# import dill
from .decorators import cache
@cache
def unpickle(fpath):
"""Unpickle path (but only if it's not done yet)."""
with open(fpath, 'rb') as f:
return pickle.load(f)
# @cache
# def undill(fpath):
# """Undill path (but only if it's not done yet)."""
# with open(fpath, 'rb') as f:
# return dill.load(f)
from pathlib import Path
class Jar(object):
def __init__(self):
self.labels = []
@property
def label(self):
return '.'.join(['Jar'] + self.labels + ['dill'])
def clear(self):
del self.labels[:]
def seal(self, obj, path=None):
if path is None:
path = Path.cwd()
with open(str(path / self.label), 'wb') as f:
pickle.dump(obj, f)
def unseal(self, path=None):
if path is None:
path = Path.cwd()
return unpickle(str(path / self.label))
def __str__(self):
return self.label
|
import asyncio
from aiogram import Bot, Dispatcher, executor
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from config import BOT_TOKEN
loop = asyncio.get_event_loop()
bot = Bot(BOT_TOKEN, parse_mode="HTML")
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage, loop=loop)
if __name__ == '__main__':
from handlers import *
executor.start_polling(dp)
|
import numpy as np
from share import *
def xor_data(num_points):
X_xor = np.random.randn(num_points, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
return X_xor, y_xor
def circle_data(num_points, radius):
X = np.random.randn(num_points, 2)
y = X[:, 0]**2 + X[:, 1]**2 - radius**2
y = np.where(y > 0, 1, -1)
return X, y
def random_data(num_points):
X = np.random.randn(num_points, 2)
y = np.random.randn(num_points)
y = np.where(y > 0, 1, -1)
return X, y
# data
np.random.seed(0)
num_points = 200
# svm
from sklearn.svm import SVC
for option in ['xor', 'circle', 'random']:
if option == 'xor':
X, y = xor_data(num_points)
elif option == 'circle':
X, y = circle_data(num_points, 1)
else:
X, y = random_data(num_points)
svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X, y)
plot_decision_regions(X, y, classifier=svm, title=option)
|
# Generated by Django 2.2.13 on 2020-11-10 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('majora2', '0133_auto_20201105_1507'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='revoked_reason',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
|
from tests.fixtures import *
import torch.nn as nn
from numpy.testing import assert_array_equal
# class TestAtomisticModel:
# def test_model_types(self, atomistic_model):
# assert type(atomistic_model.output_modules) == nn.ModuleList
#
# def test_forward_pass(self, atomistic_model, dataloader, result_shapes):
# for batch in dataloader:
# results = atomistic_model(batch)
# for prop, result in results.items():
# assert_array_equal(result.shape, result_shapes[prop])
|
'''
Purpose: Generate train and test data files for the VAR model.
'''
import pandas as pd
import numpy as np
# Open full dataset with shape (510, 448, 304, 10) corresponding to (months, height, width, # of predictors).
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/preprocessing/whole_data.npy", "rb") as f:
whole_data = np.load(f)
# Convert NaNs to 0.0 and average over the spatial area.
whole_data = np.nan_to_num(whole_data)
whole_data = np.mean(whole_data, axis=(1,2))
# Load ice extent data
extents = np.load("/umbc/xfs1/cybertrn/reu2021/team1/research/plotting/real_ice_extents.npy")
extents = np.expand_dims(extents, axis=1)
# Train-test split. This code trains on 1979-2012 and tests on 2013-2020.
var_train = whole_data[:408, :]
var_test = whole_data[408:504]
# Add extents to train and test data.
var_train = np.concatenate((var_train, extents[:408]), axis=1)
var_test = np.concatenate((var_test, extents[408:504]), axis=1)
var_train = pd.DataFrame(var_train)
var_test = pd.DataFrame(var_test)
# Save train and test data to csvs.
var_train.to_csv("var_final_train.csv")
var_test.to_csv("var_final_test.csv")
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
PON_ALARM_CODE_LOS = 0
PON_ALARM_CODE_LOSI = 1
PON_ALARM_CODE_DOWI = 2
PON_ALARM_CODE_LOFI = 3
PON_ALARM_CODE_RDII = 4
PON_ALARM_CODE_LOAMI = 5
PON_ALARM_CODE_LCDGI = 6
PON_ALARM_CODE_LOAI = 7
PON_ALARM_CODE_SDI = 8
PON_ALARM_CODE_SFI = 9
PON_ALARM_CODE_PEE = 10
PON_ALARM_CODE_DGI = 11
PON_ALARM_CODE_LOKI = 12
PON_ALARM_CODE_TIWI = 13
PON_ALARM_CODE_TIA = 14
PON_ALARM_CODE_LAST_ALARM = 15
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`."""
import warnings
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`",
DeprecationWarning, stacklevel=2
)
class GKEClusterHook(GKEHook):
"""
This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
|
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import ast
import logging
import optparse
import re
import sys
from collections import Counter
from collections.abc import Container, Iterable, Iterator, Sequence
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass
from functools import partial
from itertools import chain
from keyword import iskeyword
from pathlib import Path
from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple
from flake8 import checker # type: ignore[import]
from flake8.options.manager import OptionManager # type: ignore[import]
from flake8.plugins.manager import Plugin # type: ignore[import]
from flake8.plugins.pyflakes import FlakesChecker # type: ignore[import]
from pyflakes.checker import (
PY2,
ClassDefinition,
ClassScope,
FunctionScope,
ModuleScope,
)
if sys.version_info >= (3, 9):
from ast import unparse
else:
from ast_decompiler import decompile as unparse
if TYPE_CHECKING:
from typing import Literal, TypeGuard
__version__ = "22.5.1"
LOG = logging.getLogger("flake8.pyi")
class Error(NamedTuple):
lineno: int
col: int
message: str
type: type
class TypeVarInfo(NamedTuple):
cls_name: str
name: str
_MAPPING_SLICE = "KeyType, ValueType"
_TYPE_SLICE = "MyClass"
_COUNTER_SLICE = "KeyType"
_COROUTINE_SLICE = "YieldType, SendType, ReturnType"
_ASYNCGEN_SLICE = "YieldType, SendType"
# ChainMap and AsyncContextManager do not exist in typing or typing_extensions in Python 2,
# so we can disallow importing them from anywhere except collections and contextlib respectively.
# A Python 2-compatible check
_BAD_Y022_IMPORTS = {
# typing aliases for collections
"typing.Counter": ("collections.Counter", _COUNTER_SLICE),
"typing.Deque": ("collections.deque", "T"),
"typing.DefaultDict": ("collections.defaultdict", _MAPPING_SLICE),
"typing.ChainMap": ("collections.ChainMap", _MAPPING_SLICE),
# typing aliases for builtins
"typing.Dict": ("dict", _MAPPING_SLICE),
"typing.FrozenSet": ("frozenset", "T"),
"typing.List": ("list", "T"),
"typing.Set": ("set", "T"),
"typing.Tuple": ("tuple", "Foo, Bar"),
"typing.Type": ("type", _TYPE_SLICE),
# One typing alias for contextlib
"typing.AsyncContextManager": ("contextlib.AbstractAsyncContextManager", "T"),
# typing_extensions aliases for collections
"typing_extensions.Counter": ("collections.Counter", _COUNTER_SLICE),
"typing_extensions.Deque": ("collections.deque", "T"),
"typing_extensions.DefaultDict": ("collections.defaultdict", _MAPPING_SLICE),
"typing_extensions.ChainMap": ("collections.ChainMap", _MAPPING_SLICE),
# One typing_extensions alias for a builtin
"typing_extensions.Type": ("type", _TYPE_SLICE),
# one typing_extensions alias for contextlib
"typing_extensions.AsyncContextManager": (
"contextlib.AbstractAsyncContextManager",
"T",
),
}
# Objects you should import from collections.abc/typing instead of typing_extensions
# A Python 2-compatible check
_BAD_COLLECTIONSABC_Y023_IMPORTS = {
"Awaitable": "T",
"Coroutine": _COROUTINE_SLICE,
"AsyncIterable": "T",
"AsyncIterator": "T",
"AsyncGenerator": _ASYNCGEN_SLICE,
}
_BAD_TYPING_Y023_IMPORTS = frozenset(
{
"Protocol",
"runtime_checkable",
"NewType",
"overload",
"Text",
"NoReturn",
# ClassVar deliberately omitted, as it's the only one in this group that should be parameterised
# It is special-case elsewhere
}
)
# Objects you should import from collections.abc instead of typing(_extensions)
# A Python 2-incompatible check
# typing.AbstractSet is deliberately omitted (special-cased)
# We use `None` to signify that the object shouldn't be parameterised.
_BAD_Y027_IMPORTS = {
"ByteString": None,
"Collection": "T",
"ItemsView": _MAPPING_SLICE,
"KeysView": "KeyType",
"Mapping": _MAPPING_SLICE,
"MappingView": None,
"MutableMapping": _MAPPING_SLICE,
"MutableSequence": "T",
"MutableSet": "T",
"Sequence": "T",
"ValuesView": "ValueType",
"Iterable": "T",
"Iterator": "T",
"Generator": "YieldType, SendType, ReturnType",
"Hashable": None,
"Reversible": "T",
"Sized": None,
"Coroutine": _COROUTINE_SLICE,
"AsyncGenerator": _ASYNCGEN_SLICE,
"AsyncIterator": "T",
"AsyncIterable": "T",
"Awaitable": "T",
"Callable": None,
"Container": "T",
}
class PyiAwareFlakesChecker(FlakesChecker):
def deferHandleNode(self, node: ast.AST | None, parent) -> None:
self.deferFunction(lambda: self.handleNode(node, parent))
def ASSIGN(self, node: ast.Assign) -> None:
"""This is a custom implementation of ASSIGN derived from
handleChildren() in pyflakes 1.3.0.
The point here is that on module level, there's type aliases that we
want to bind eagerly, but defer computation of the values of the
assignments (the type aliases might have forward references).
"""
if not isinstance(self.scope, ModuleScope):
super().ASSIGN(node)
return
for target in node.targets:
self.handleNode(target, node)
self.deferHandleNode(node.value, node)
def ANNASSIGN(self, node: ast.AnnAssign) -> None:
"""
Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level).
"""
if node.value:
# Only bind the *target* if the assignment has value.
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
if not isinstance(self.scope, FunctionScope):
self.deferHandleNode(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value*...
if isinstance(self.scope, ModuleScope):
# ...later (if module scope).
self.deferHandleNode(node.value, node)
else:
# ...now.
self.handleNode(node.value, node)
def LAMBDA(self, node: ast.Lambda) -> None:
"""This is likely very brittle, currently works for pyflakes 1.3.0.
Deferring annotation handling depends on the fact that during calls
to LAMBDA visiting the function's body is already deferred and the
only eager calls to `handleNode` are for annotations.
"""
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode # type: ignore[assignment]
super().LAMBDA(node)
self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode # type: ignore[assignment]
def CLASSDEF(self, node: ast.ClassDef) -> None:
if not isinstance(self.scope, ModuleScope):
# This shouldn't be necessary because .pyi files don't nest
# scopes much, but better safe than sorry.
super().CLASSDEF(node)
return
# What follows is copied from pyflakes 1.3.0. The only changes are the
# deferHandleNode calls.
for decorator in node.decorator_list:
self.handleNode(decorator, node)
for baseNode in node.bases:
self.deferHandleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.deferHandleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (
self.withDoctest
and not self._in_doctest()
and not isinstance(self.scope, FunctionScope)
):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
def handleNodeDelete(self, node: ast.AST) -> None:
"""Null implementation.
Lets users use `del` in stubs to denote private names.
"""
return
class PyiAwareFileChecker(checker.FileChecker):
def run_check(self, plugin: Plugin, **kwargs: Any) -> Any:
if self.filename == "-":
filename = self.options.stdin_display_name
else:
filename = self.filename
if filename.endswith(".pyi") and plugin["plugin"] == FlakesChecker:
LOG.info(
"Replacing FlakesChecker with PyiAwareFlakesChecker while "
"checking %r",
filename,
)
plugin = dict(plugin)
plugin["plugin"] = PyiAwareFlakesChecker
return super().run_check(plugin, **kwargs)
class LegacyNormalizer(ast.NodeTransformer):
"""Transform AST to be consistent across Python versions."""
if sys.version_info < (3, 9):
def visit_Index(self, node: ast.Index) -> ast.expr:
"""Index nodes no longer exist in Python 3.9.
For example, consider the AST representing Union[str, int].
Before 3.9: Subscript(value=Name(id='Union'), slice=Index(value=Tuple(...)))
3.9 and newer: Subscript(value=Name(id='Union'), slice=Tuple(...))
"""
return node.value
def _ast_node_for(string: str) -> ast.AST:
"""Helper function for doctests"""
expr = ast.parse(string).body[0]
assert isinstance(expr, ast.Expr)
return expr.value
def _is_name(node: ast.expr | None, name: str) -> bool:
"""Return True if `node` is an `ast.Name` node with id `name`
>>> import ast
>>> node = ast.Name(id="Any")
>>> _is_name(node, "Any")
True
"""
return isinstance(node, ast.Name) and node.id == name
_is_BaseException = partial(_is_name, name="BaseException")
_TYPING_MODULES = frozenset({"typing", "typing_extensions"})
def _is_object(node: ast.expr | None, name: str, *, from_: Container[str]) -> bool:
"""Determine whether `node` is an ast representation of `name`.
Return True if `node` is either:
1). Of shape `ast.Name(id=<name>)`, or;
2). Of shape `ast.Attribute(value=ast.Name(id=<parent>), attr=<name>)`,
where <parent> is a string that can be found within the `from_` collection of
strings.
>>> from functools import partial
>>> _is_Literal = partial(_is_object, name="Literal", from_=_TYPING_MODULES)
>>> _is_Literal(_ast_node_for("Literal"))
True
>>> _is_Literal(_ast_node_for("typing.Literal"))
True
>>> _is_Literal(_ast_node_for("typing_extensions.Literal"))
True
"""
return _is_name(node, name) or (
isinstance(node, ast.Attribute)
and node.attr == name
and isinstance(node.value, ast.Name)
and node.value.id in from_
)
_is_TypeAlias = partial(_is_object, name="TypeAlias", from_=_TYPING_MODULES)
_is_NamedTuple = partial(_is_object, name="NamedTuple", from_={"typing"})
_is_TypedDict = partial(_is_object, name="TypedDict", from_=_TYPING_MODULES)
_is_Literal = partial(_is_object, name="Literal", from_=_TYPING_MODULES)
_is_Union = partial(_is_object, name="Union", from_={"typing"})
_is_abstractmethod = partial(_is_object, name="abstractmethod", from_={"abc"})
_is_Any = partial(_is_object, name="Any", from_={"typing"})
_is_overload = partial(_is_object, name="overload", from_={"typing"})
_is_final = partial(_is_object, name="final", from_=_TYPING_MODULES)
_is_Final = partial(_is_object, name="Final", from_=_TYPING_MODULES)
_is_Self = partial(_is_object, name="Self", from_=({"_typeshed"} | _TYPING_MODULES))
_is_TracebackType = partial(_is_object, name="TracebackType", from_={"types"})
_is_builtins_object = partial(_is_object, name="object", from_={"builtins"})
def _get_name_of_class_if_from_modules(
classnode: ast.expr, *, modules: Container[str]
) -> str | None:
"""
If `classnode` is an `ast.Name`, return `classnode.id`.
If it's an `ast.Attribute`, check that the part before the dot is a module in `modules`.
If it is, return the part after the dot; if it isn't, return `None`.
If `classnode` is anything else, return `None`.
>>> _get_name_of_class_if_from_modules(_ast_node_for('int'), modules={'builtins'})
'int'
>>> _get_name_of_class_if_from_modules(_ast_node_for('builtins.int'), modules={'builtins'})
'int'
>>> _get_name_of_class_if_from_modules(_ast_node_for('builtins.int'), modules={'typing'}) is None
True
"""
if isinstance(classnode, ast.Name):
return classnode.id
if (
isinstance(classnode, ast.Attribute)
and isinstance(classnode.value, ast.Name)
and classnode.value.id in modules
):
return classnode.attr
return None
def _is_type_or_Type(node: ast.expr) -> bool:
"""
>>> _is_type_or_Type(_ast_node_for('type'))
True
>>> _is_type_or_Type(_ast_node_for('Type'))
True
>>> _is_type_or_Type(_ast_node_for('builtins.type'))
True
>>> _is_type_or_Type(_ast_node_for('typing_extensions.Type'))
True
>>> _is_type_or_Type(_ast_node_for('typing.Type'))
True
"""
if isinstance(node, ast.Name):
return node.id in {"type", "Type"}
if isinstance(node, ast.Attribute):
node_value = node.value
if not isinstance(node_value, ast.Name):
return False
node_value_id = node_value.id
attr = node.attr
return (node_value_id == "builtins" and attr == "type") or (
node_value_id in _TYPING_MODULES and attr == "Type"
)
return False
def _is_PEP_604_union(node: ast.expr | None) -> TypeGuard[ast.BinOp]:
return isinstance(node, ast.BinOp) and isinstance(node.op, ast.BitOr)
def _is_None(node: ast.expr) -> bool:
# <=3.7: `BaseException | None` parses as BinOp(left=Name(id='BaseException'), op=BitOr(), right=NameConstant(value=None))`
# >=3.8: `BaseException | None` parses as BinOp(left=Name(id='BaseException'), op=BitOr(), right=Constant(value=None))`
# ast.NameConstant is deprecated in 3.8+, but doesn't raise a DeprecationWarning (and the isinstance() check still works)
return isinstance(node, ast.NameConstant) and node.value is None
class ExitArgAnalysis(NamedTuple):
is_union_with_None: bool
non_None_part: ast.expr | None
def __repr__(self) -> str:
if self.non_None_part is None:
non_None_part_repr = "None"
else:
non_None_part_repr = ast.dump(self.non_None_part)
return (
f"ExitArgAnalysis("
f"is_union_with_None={self.is_union_with_None}, "
f"non_None_part={non_None_part_repr}"
f")"
)
def _analyse_exit_method_arg(node: ast.BinOp) -> ExitArgAnalysis:
"""Return a two-item tuple providing analysis of the annotation of an exit-method arg.
The `node` represents a union type written as `X | Y`.
>>> _analyse_exit_method_arg(_ast_node_for('int | str'))
ExitArgAnalysis(is_union_with_None=False, non_None_part=None)
>>> _analyse_exit_method_arg(_ast_node_for('int | None'))
ExitArgAnalysis(is_union_with_None=True, non_None_part=Name(id='int', ctx=Load()))
>>> _analyse_exit_method_arg(_ast_node_for('None | str'))
ExitArgAnalysis(is_union_with_None=True, non_None_part=Name(id='str', ctx=Load()))
"""
assert isinstance(node.op, ast.BitOr)
if _is_None(node.left):
return ExitArgAnalysis(is_union_with_None=True, non_None_part=node.right)
if _is_None(node.right):
return ExitArgAnalysis(is_union_with_None=True, non_None_part=node.left)
return ExitArgAnalysis(is_union_with_None=False, non_None_part=None)
def _is_decorated_with_final(
node: ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef,
) -> bool:
return any(_is_final(decorator) for decorator in node.decorator_list)
def _get_collections_abc_obj_id(node: ast.expr | None) -> str | None:
"""
If the node represents a subscripted object from collections.abc or typing,
return the name of the object.
Else, return None.
>>> _get_collections_abc_obj_id(_ast_node_for('AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('typing.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('typing_extensions.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('collections.abc.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('collections.OrderedDict[str, int]')) is None
True
"""
if not isinstance(node, ast.Subscript):
return None
subscripted_obj = node.value
if isinstance(subscripted_obj, ast.Name):
return subscripted_obj.id
if not isinstance(subscripted_obj, ast.Attribute):
return None
obj_value, obj_attr = subscripted_obj.value, subscripted_obj.attr
if isinstance(obj_value, ast.Name) and obj_value.id in _TYPING_MODULES:
return obj_attr
if (
isinstance(obj_value, ast.Attribute)
and _is_name(obj_value.value, "collections")
and obj_value.attr == "abc"
):
return obj_attr
return None
_ITER_METHODS = frozenset({("Iterator", "__iter__"), ("AsyncIterator", "__aiter__")})
_INPLACE_BINOP_METHODS = frozenset(
{
"__iadd__",
"__isub__",
"__imul__",
"__imatmul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ipow__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
}
)
def _has_bad_hardcoded_returns(
method: ast.FunctionDef | ast.AsyncFunctionDef, *, classdef: ast.ClassDef
) -> bool:
"""Return `True` if `function` should be rewritten using `_typeshed.Self`."""
# Much too complex for our purposes to worry about overloaded functions or abstractmethods
if any(
_is_overload(deco) or _is_abstractmethod(deco) for deco in method.decorator_list
):
return False
if not _non_kw_only_args_of(method.args): # weird, but theoretically possible
return False
method_name, returns = method.name, method.returns
if isinstance(method, ast.AsyncFunctionDef):
return (
method_name == "__aenter__"
and _is_name(returns, classdef.name)
and not _is_decorated_with_final(classdef)
)
if method_name in _INPLACE_BINOP_METHODS:
return returns is not None and not _is_Self(returns)
if _is_name(returns, classdef.name):
return method_name in {"__enter__", "__new__"} and not _is_decorated_with_final(
classdef
)
return_obj_name = _get_collections_abc_obj_id(returns)
return (return_obj_name, method_name) in _ITER_METHODS and any(
_get_collections_abc_obj_id(base_node) == return_obj_name
for base_node in classdef.bases
)
def _unparse_assign_node(node: ast.Assign | ast.AnnAssign) -> str:
"""Unparse an Assign node, and remove any newlines in it"""
return unparse(node).replace("\n", "")
def _unparse_func_node(node: ast.FunctionDef | ast.AsyncFunctionDef) -> str:
"""Unparse a function node, and reformat it to fit on one line."""
return re.sub(r"\s+", " ", unparse(node)).strip()
def _is_list_of_str_nodes(seq: list[ast.expr | None]) -> TypeGuard[list[ast.Str]]:
return all(isinstance(item, ast.Str) for item in seq)
def _is_bad_TypedDict(node: ast.Call) -> bool:
"""Evaluate whether an assignment-based TypedDict should be rewritten using class syntax.
Return `False` if the TypedDict appears as though it may be invalidly defined;
type-checkers will raise an error in that eventuality.
"""
args = node.args
if len(args) != 2:
return False
typed_dict_annotations = args[1]
# The runtime supports many ways of creating a TypedDict,
# e.g. `T = TypeDict('T', [['foo', int], ['bar', str]])`,
# but PEP 589 states that type-checkers are only expected
# to accept a dictionary literal for the second argument.
if not isinstance(typed_dict_annotations, ast.Dict):
return False
typed_dict_fields = typed_dict_annotations.keys
if not _is_list_of_str_nodes(typed_dict_fields):
return False
fieldnames = [field.s for field in typed_dict_fields]
return all(
fieldname.isidentifier() and not iskeyword(fieldname)
for fieldname in fieldnames
)
def _non_kw_only_args_of(args: ast.arguments) -> list[ast.arg]:
"""Return a list containing the pos-only args and pos-or-kwd args of `args`"""
# pos-only args don't exist on 3.7
pos_only_args: list[ast.arg] = getattr(args, "posonlyargs", [])
return pos_only_args + args.args
def _is_assignment_which_must_have_a_value(
target_name: str | None, *, in_class: bool
) -> bool:
return (target_name == "__match_args__" and in_class) or (
target_name == "__all__" and not in_class
)
@dataclass
class NestingCounter:
"""Class to help the PyiVisitor keep track of internal state"""
nesting: int = 0
@contextmanager
def enabled(self) -> Iterator[None]:
self.nesting += 1
try:
yield
finally:
self.nesting -= 1
@property
def active(self) -> bool:
"""Determine whether the level of nesting is currently non-zero"""
return bool(self.nesting)
class PyiVisitor(ast.NodeVisitor):
def __init__(self, filename: Path | None = None) -> None:
self.filename = Path("(none)") if filename is None else filename
self.errors: list[Error] = []
# Mapping of all private TypeVars/ParamSpecs/TypeVarTuples to the nodes where they're defined
self.typevarlike_defs: dict[TypeVarInfo, ast.Assign] = {}
# Mapping of each name in the file to the no. of occurrences
self.all_name_occurrences: Counter[str] = Counter()
self.string_literals_allowed = NestingCounter()
self.in_function = NestingCounter()
self.in_class = NestingCounter()
self.visiting_TypeAlias = NestingCounter()
# This is only relevant for visiting classes
self.current_class_node: ast.ClassDef | None = None
def __repr__(self) -> str:
return f"{self.__class__.__name__}(filename={self.filename!r})"
@staticmethod
def _get_Y023_error_message(object_name: str) -> str | None:
"""
Return the appropriate error message for a bad import/attribute-access from typing_extensions.
Return `None` if it's an OK import/attribute-access.
"""
if object_name in _BAD_COLLECTIONSABC_Y023_IMPORTS:
slice_contents = _BAD_COLLECTIONSABC_Y023_IMPORTS[object_name]
suggestion = (
f'"collections.abc.{object_name}[{slice_contents}]" '
f'(or "typing.{object_name}[{slice_contents}]" '
f"in Python 2-compatible code)"
)
bad_syntax = f'"typing_extensions.{object_name}[{slice_contents}]"'
elif object_name in _BAD_TYPING_Y023_IMPORTS:
suggestion = f'"typing.{object_name}"'
bad_syntax = f'"typing_extensions.{object_name}"'
elif object_name == "ClassVar":
suggestion = '"typing.ClassVar[T]"'
bad_syntax = '"typing_extensions.ClassVar[T]"'
elif object_name == "ContextManager":
suggestion = (
'"contextlib.AbstractContextManager[T]" '
'(or "typing.ContextManager[T]" '
"in Python 2-compatible code)"
)
bad_syntax = '"typing_extensions.ContextManager[T]"'
else:
return None
return Y023.format(good_syntax=suggestion, bad_syntax=bad_syntax)
def _check_import_or_attribute(
self, node: ast.Attribute | ast.ImportFrom, module_name: str, object_name: str
) -> None:
fullname = f"{module_name}.{object_name}"
# Y022 errors
if fullname in _BAD_Y022_IMPORTS:
good_cls_name, params = _BAD_Y022_IMPORTS[fullname]
error_message = Y022.format(
good_syntax=f'"{good_cls_name}[{params}]"',
bad_syntax=f'"{fullname}[{params}]"',
)
# Y027 errors
elif module_name == "typing" and object_name in _BAD_Y027_IMPORTS:
slice_contents = _BAD_Y027_IMPORTS[object_name]
params = "" if slice_contents is None else f"[{slice_contents}]"
error_message = Y027.format(
good_syntax=f'"collections.abc.{object_name}{params}"',
bad_syntax=f'"typing.{object_name}{params}"',
)
elif module_name in _TYPING_MODULES and object_name == "OrderedDict":
error_message = Y027.format(
good_syntax=f'"collections.OrderedDict[{_MAPPING_SLICE}]"',
bad_syntax=f'"{fullname}[{_MAPPING_SLICE}]"',
)
elif fullname == "typing.ContextManager":
error_message = Y027.format(
good_syntax='"contextlib.AbstractContextManager[T]"',
bad_syntax='"typing.ContextManager[T]"',
)
# Y023 errors
elif module_name == "typing_extensions":
analysis = self._get_Y023_error_message(object_name)
if analysis is None:
return
else:
error_message = analysis
# Y024 errors
elif fullname == "collections.namedtuple":
error_message = Y024
# Y037 errors
elif fullname == "typing.Optional":
error_message = Y037.format(
old_syntax=fullname, example='"int | None" instead of "Optional[int]"'
)
elif fullname == "typing.Union":
error_message = Y037.format(
old_syntax=fullname, example='"int | str" instead of "Union[int, str]"'
)
# Y039 errors
elif fullname == "typing.Text":
error_message = Y039
else:
return
self.error(node, error_message)
def visit_Attribute(self, node: ast.Attribute) -> None:
self.generic_visit(node)
thing = node.value
if not isinstance(thing, ast.Name):
return
self._check_import_or_attribute(
node=node, module_name=thing.id, object_name=node.attr
)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
module_name, imported_objects = node.module, node.names
if module_name is None:
return
if module_name == "collections.abc" and any(
obj.name == "Set" and obj.asname != "AbstractSet"
for obj in imported_objects
):
return self.error(node, Y025)
for obj in imported_objects:
self._check_import_or_attribute(
node=node, module_name=module_name, object_name=obj.name
)
if module_name == "typing" and any(
obj.name == "AbstractSet" for obj in imported_objects
):
self.error(node, Y038)
def _check_assignment_to_function(
self, node: ast.Assign, function: ast.expr, object_name: str
) -> None:
"""Attempt to find assignments to TypeVar-like objects.
TypeVars should usually be private.
If they are private, they should be used at least once in the file in which they are defined.
"""
cls_name = _get_name_of_class_if_from_modules(function, modules=_TYPING_MODULES)
if cls_name is None:
return
if cls_name in {"TypeVar", "ParamSpec", "TypeVarTuple"}:
if object_name.startswith("_"):
target_info = TypeVarInfo(cls_name=cls_name, name=object_name)
self.typevarlike_defs[target_info] = node
else:
self.error(node, Y001.format(cls_name))
def visit_Assign(self, node: ast.Assign) -> None:
if self.in_function.active:
# We error for unexpected things within functions separately.
self.generic_visit(node)
return
if len(node.targets) == 1:
target = node.targets[0]
if isinstance(target, ast.Name):
target_name = target.id
else:
self.error(node, Y017)
target_name = None
else:
self.error(node, Y017)
target_name = None
is_special_assignment = _is_assignment_which_must_have_a_value(
target_name, in_class=self.in_class.active
)
if is_special_assignment:
with self.string_literals_allowed.enabled():
self.generic_visit(node)
else:
self.generic_visit(node)
if target_name is None:
return
assignment = node.value
if isinstance(assignment, ast.Call):
self._check_assignment_to_function(
node=node, function=assignment.func, object_name=target_name
)
elif isinstance(assignment, (ast.Num, ast.Str, ast.Bytes)):
return self._Y015_error(node)
if (
isinstance(assignment, (ast.Constant, ast.NameConstant))
and not isinstance(assignment, ast.Ellipsis)
and assignment.value is not None
):
return self._Y015_error(node)
if not is_special_assignment:
self._check_for_type_aliases(node, target_name, assignment)
def visit_AugAssign(self, node: ast.AugAssign) -> None:
"""Allow `__all__ += ['foo', 'bar']` in a stub file"""
target, value = node.target, node.value
self.visit(target)
if _is_name(target, "__all__") and isinstance(node.op, ast.Add):
with self.string_literals_allowed.enabled():
self.visit(value)
else:
self.visit(value)
def _check_for_type_aliases(
self, node: ast.Assign, target_name: str, assignment: ast.expr
) -> None:
"""
Check for assignments that look like they could be type aliases,
but aren't annotated with `typing(_extensions).TypeAlias`.
We avoid triggering Y026 for calls and = ... because there are various
unusual cases where assignment to the result of a call is legitimate
in stubs (`T = TypeVar("T")`, `List = _Alias()`, etc.).
We also avoid triggering Y026 for aliases like `X = Any` or `Y = str`.
It's ultimately nearly impossible to reliably detect
whether these are type aliases or variable aliases,
unless you're a type checker (and we're not).
"""
if isinstance(assignment, ast.BinOp):
return self.error(node, Y026)
if not isinstance(assignment, ast.Subscript):
return
subscripted_object = assignment.value
if _is_Union(subscripted_object) or _is_Literal(subscripted_object):
self.error(node, Y026)
def visit_Name(self, node: ast.Name) -> None:
self.all_name_occurrences[node.id] += 1
def visit_Call(self, node: ast.Call) -> None:
function = node.func
self.visit(function)
if _is_NamedTuple(function):
return self.error(node, Y028)
elif _is_TypedDict(function):
if _is_bad_TypedDict(node):
self.error(node, Y031)
return
# String literals can appear in positional arguments for
# TypeVar definitions.
with self.string_literals_allowed.enabled():
for arg in node.args:
self.visit(arg)
# But in keyword arguments they're most likely TypeVar bounds,
# which should not be quoted.
for kw in node.keywords:
self.visit(kw)
# 3.8+
def visit_Constant(self, node: ast.Constant) -> None:
if not self.string_literals_allowed.active and isinstance(node.value, str):
self.error(node, Y020)
# 3.7 and lower
def visit_Str(self, node: ast.Str) -> None:
if not self.string_literals_allowed.active:
self.error(node, Y020)
def visit_Expr(self, node: ast.Expr) -> None:
if isinstance(node.value, ast.Str):
self.error(node, Y021)
else:
self.generic_visit(node)
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
node_annotation = node.annotation
if _is_Final(node_annotation):
with self.string_literals_allowed.enabled():
self.generic_visit(node)
return
node_target, node_value = node.target, node.value
if isinstance(node_target, ast.Name):
target_name = node_target.id
if _is_assignment_which_must_have_a_value(
target_name, in_class=self.in_class.active
):
with self.string_literals_allowed.enabled():
self.generic_visit(node)
if node_value is None:
self.error(node, Y035.format(var=target_name))
return
if _is_TypeAlias(node_annotation):
with self.visiting_TypeAlias.enabled():
self.generic_visit(node)
return
self.generic_visit(node)
if node_value and not isinstance(node_value, ast.Ellipsis):
self._Y015_error(node)
def _check_union_members(self, members: Sequence[ast.expr]) -> None:
members_by_dump: dict[str, list[ast.expr]] = {}
for member in members:
members_by_dump.setdefault(ast.dump(member), []).append(member)
dupes_in_union = False
for member_list in members_by_dump.values():
if len(member_list) >= 2:
self.error(member_list[1], Y016.format(unparse(member_list[1])))
dupes_in_union = True
if not dupes_in_union:
self._check_for_multiple_literals(members)
if not self.visiting_TypeAlias.active:
self._check_for_redundant_numeric_unions(members)
def _Y041_error(
self, members: Sequence[ast.expr], subtype: str, supertype: str
) -> None:
self.error(
members[0],
Y041.format(implicit_subtype=subtype, implicit_supertype=supertype),
)
def _check_for_redundant_numeric_unions(self, members: Sequence[ast.expr]) -> None:
complex_in_union, float_in_union, int_in_union = False, False, False
for member in members:
name = _get_name_of_class_if_from_modules(member, modules={"builtins"})
if name is None:
continue
if name == "complex":
complex_in_union = True
elif name == "float":
float_in_union = True
elif name == "int":
int_in_union = True
if complex_in_union:
if float_in_union:
self._Y041_error(members, subtype="float", supertype="complex")
if int_in_union:
self._Y041_error(members, subtype="int", supertype="complex")
elif float_in_union and int_in_union:
self._Y041_error(members, subtype="int", supertype="float")
def _check_for_multiple_literals(self, members: Sequence[ast.expr]) -> None:
literals_in_union, non_literals_in_union = [], []
for member in members:
if isinstance(member, ast.Subscript) and _is_Literal(member.value):
literals_in_union.append(member.slice)
else:
non_literals_in_union.append(member)
if len(literals_in_union) < 2:
return
new_literal_members: list[ast.expr] = []
for literal in literals_in_union:
if isinstance(literal, ast.Tuple):
new_literal_members.extend(literal.elts)
else:
new_literal_members.append(literal)
new_literal_slice = unparse(ast.Tuple(new_literal_members)).strip("()")
if non_literals_in_union:
suggestion = f'Combine them into one, e.g. "Literal[{new_literal_slice}]".'
else:
suggestion = f'Use a single Literal, e.g. "Literal[{new_literal_slice}]".'
self.error(members[0], Y030.format(suggestion=suggestion))
def visit_BinOp(self, node: ast.BinOp) -> None:
if not isinstance(node.op, ast.BitOr):
self.generic_visit(node)
return
# str|int|None parses as BinOp(BinOp(str, |, int), |, None)
current: ast.expr = node
members = []
while isinstance(current, ast.BinOp) and isinstance(current.op, ast.BitOr):
members.append(current.right)
current = current.left
members.append(current)
members.reverse()
# Do not call generic_visit(node), that would call this method again unnecessarily
for member in members:
self.visit(member)
self._check_union_members(members)
def visit_Subscript(self, node: ast.Subscript) -> None:
subscripted_object = node.value
if isinstance(subscripted_object, ast.Name):
subscripted_object_name = subscripted_object.id
elif (
isinstance(subscripted_object, ast.Attribute)
and isinstance(subscripted_object.value, ast.Name)
and subscripted_object.value.id in _TYPING_MODULES
):
subscripted_object_name = subscripted_object.attr
else:
subscripted_object_name = None
self.visit(subscripted_object)
if subscripted_object_name == "Literal":
with self.string_literals_allowed.enabled():
self.visit(node.slice)
return
if isinstance(node.slice, ast.Tuple):
self._visit_slice_tuple(node.slice, subscripted_object_name)
else:
self.visit(node.slice)
def _visit_slice_tuple(self, node: ast.Tuple, parent: str | None) -> None:
if parent == "Union":
self._check_union_members(node.elts)
self.visit(node)
elif parent == "Annotated":
# Allow literals, except in the first argument
if len(node.elts) > 1:
self.visit(node.elts[0])
with self.string_literals_allowed.enabled():
for elt in node.elts[1:]:
self.visit(elt)
else:
self.visit(node)
else:
self.visit(node)
def visit_If(self, node: ast.If) -> None:
test = node.test
# No types can appear in if conditions, so avoid confusing additional errors.
with self.string_literals_allowed.enabled():
self.visit(test)
if isinstance(test, ast.BoolOp):
for expression in test.values:
self._check_if_expression(expression)
else:
self._check_if_expression(test)
for line in chain(node.body, node.orelse):
self.visit(line)
def _check_if_expression(self, node: ast.expr) -> None:
if not isinstance(node, ast.Compare):
self.error(node, Y002)
return
if len(node.comparators) != 1:
# mypy doesn't support chained comparisons
self.error(node, Y002)
return
if isinstance(node.left, ast.Subscript):
self._check_subscript_version_check(node)
elif isinstance(node.left, ast.Attribute):
if _is_name(node.left.value, "sys"):
if node.left.attr == "platform":
self._check_platform_check(node)
elif node.left.attr == "version_info":
self._check_version_check(node)
else:
self.error(node, Y002)
else:
self.error(node, Y002)
else:
self.error(node, Y002)
def _check_subscript_version_check(self, node: ast.Compare) -> None:
# unless this is on, comparisons against a single integer aren't allowed
must_be_single = False
# if strict equality is allowed, it must be against a tuple of this length
can_have_strict_equals: int | None = None
version_info = node.left
if isinstance(version_info, ast.Subscript):
slc = version_info.slice
# TODO: ast.Num works, but is deprecated
if isinstance(slc, ast.Num):
# anything other than the integer 0 doesn't make much sense
if isinstance(slc.n, int) and slc.n == 0:
must_be_single = True
else:
self.error(node, Y003)
return
elif isinstance(slc, ast.Slice):
if slc.lower is not None or slc.step is not None:
self.error(node, Y003)
return
elif (
# allow only [:1] and [:2]
isinstance(slc.upper, ast.Num)
and isinstance(slc.upper.n, int)
and slc.upper.n in (1, 2)
):
can_have_strict_equals = slc.upper.n
else:
self.error(node, Y003)
return
else:
# extended slicing
self.error(node, Y003)
return
self._check_version_check(
node,
must_be_single=must_be_single,
can_have_strict_equals=can_have_strict_equals,
)
def _check_version_check(
self,
node: ast.Compare,
*,
must_be_single: bool = False,
can_have_strict_equals: int | None = None,
) -> None:
comparator = node.comparators[0]
if must_be_single:
if not isinstance(comparator, ast.Num) or not isinstance(comparator.n, int):
self.error(node, Y003)
elif not isinstance(comparator, ast.Tuple):
self.error(node, Y003)
else:
if not all(isinstance(elt, ast.Num) for elt in comparator.elts):
self.error(node, Y003)
elif len(comparator.elts) > 2:
# mypy only supports major and minor version checks
self.error(node, Y004)
cmpop = node.ops[0]
if isinstance(cmpop, (ast.Lt, ast.GtE)):
pass
elif isinstance(cmpop, (ast.Eq, ast.NotEq)):
if can_have_strict_equals is not None:
if len(comparator.elts) != can_have_strict_equals:
self.error(node, Y005.format(n=can_have_strict_equals))
else:
self.error(node, Y006)
else:
self.error(node, Y006)
def _check_platform_check(self, node: ast.Compare) -> None:
cmpop = node.ops[0]
# "in" might also make sense but we don't currently have one
if not isinstance(cmpop, (ast.Eq, ast.NotEq)):
self.error(node, Y007)
return
comparator = node.comparators[0]
if isinstance(comparator, ast.Str):
# other values are possible but we don't need them right now
# this protects against typos
if comparator.s not in ("linux", "win32", "cygwin", "darwin"):
self.error(node, Y008.format(platform=comparator.s))
else:
self.error(node, Y007)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
old_class_node = self.current_class_node
self.current_class_node = node
with self.in_class.enabled():
self.generic_visit(node)
self.current_class_node = old_class_node
if any(_is_builtins_object(base_node) for base_node in node.bases):
self.error(node, Y040)
# empty class body should contain "..." not "pass"
if len(node.body) == 1:
statement = node.body[0]
if isinstance(statement, ast.Expr) and isinstance(
statement.value, ast.Ellipsis
):
return
elif isinstance(statement, ast.Pass):
self.error(statement, Y009)
return
for statement in node.body:
# "pass" should not used in class body
if isinstance(statement, ast.Pass):
self.error(statement, Y012)
# "..." should not be used in non-empty class body
elif isinstance(statement, ast.Expr) and isinstance(
statement.value, ast.Ellipsis
):
self.error(statement, Y013)
def _check_exit_method( # noqa: C901
self, node: ast.FunctionDef | ast.AsyncFunctionDef, method_name: str
) -> None:
all_args = node.args
non_kw_only_args = _non_kw_only_args_of(all_args)
num_args = len(non_kw_only_args)
varargs = all_args.vararg
def error_for_bad_exit_method(details: str) -> None:
self.error(node, Y036.format(method_name=method_name, details=details))
if num_args < 4:
if varargs:
varargs_annotation = varargs.annotation
if not (
varargs_annotation is None
or _is_builtins_object(varargs_annotation)
):
error_for_bad_exit_method(
f'Star-args in an {method_name} method should be annotated with "object", '
f'not "{unparse(varargs_annotation)}"'
)
else:
error_for_bad_exit_method(
f"If there are no star-args, "
f"there should be at least 3 non-keyword-only args "
f'in an {method_name} method (excluding "self")'
)
if len(all_args.defaults) < (num_args - 4):
error_for_bad_exit_method(
f"All arguments after the first 4 in an {method_name} method "
f"must have a default value"
)
if None in all_args.kw_defaults:
error_for_bad_exit_method(
f"All keyword-only arguments in an {method_name} method "
f"must have a default value"
)
def error_for_bad_annotation(
annotation_node: ast.expr, *, arg_number: Literal[1, 2, 3]
) -> None:
exit_arg_descriptions = [
("first", "type[BaseException] | None"),
("second", "BaseException | None"),
("third", "types.TracebackType | None"),
]
arg_name, correct_annotation = exit_arg_descriptions[arg_number - 1]
error_msg_details = (
f"The {arg_name} arg in an {method_name} method "
f'should be annotated with "{correct_annotation}" or "object", '
f'not "{unparse(annotation_node)}"'
)
error_for_bad_exit_method(details=error_msg_details)
if num_args >= 2:
arg1_annotation = non_kw_only_args[1].annotation
if arg1_annotation is None or _is_builtins_object(arg1_annotation):
pass
elif _is_PEP_604_union(arg1_annotation):
is_union_with_None, non_None_part = _analyse_exit_method_arg(
arg1_annotation
)
if not (
is_union_with_None
and isinstance(non_None_part, ast.Subscript)
and _is_type_or_Type(non_None_part.value)
and _is_BaseException(non_None_part.slice)
):
error_for_bad_annotation(arg1_annotation, arg_number=1)
else:
error_for_bad_annotation(arg1_annotation, arg_number=1)
if num_args >= 3:
arg2_annotation = non_kw_only_args[2].annotation
if arg2_annotation is None or _is_builtins_object(arg2_annotation):
pass
elif _is_PEP_604_union(arg2_annotation):
is_union_with_None, non_None_part = _analyse_exit_method_arg(
arg2_annotation
)
if not (is_union_with_None and _is_BaseException(non_None_part)):
error_for_bad_annotation(arg2_annotation, arg_number=2)
else:
error_for_bad_annotation(arg2_annotation, arg_number=2)
if num_args >= 4:
arg3_annotation = non_kw_only_args[3].annotation
if arg3_annotation is None or _is_builtins_object(arg3_annotation):
pass
elif _is_PEP_604_union(arg3_annotation):
is_union_with_None, non_None_part = _analyse_exit_method_arg(
arg3_annotation
)
if not (is_union_with_None and _is_TracebackType(non_None_part)):
error_for_bad_annotation(arg3_annotation, arg_number=3)
else:
error_for_bad_annotation(arg3_annotation, arg_number=3)
def _Y034_error(
self, node: ast.FunctionDef | ast.AsyncFunctionDef, cls_name: str
) -> None:
method_name = node.name
copied_node = deepcopy(node)
copied_node.decorator_list.clear()
copied_node.returns = ast.Name(id="Self")
first_arg = _non_kw_only_args_of(copied_node.args)[0]
if method_name == "__new__":
first_arg.annotation = ast.Subscript(
value=ast.Name(id="type"), slice=ast.Name(id="Self")
)
referrer = '"__new__" methods'
else:
first_arg.annotation = ast.Name(id="Self")
referrer = f'"{method_name}" methods in classes like "{cls_name}"'
error_message = Y034.format(
methods=referrer,
method_name=f"{cls_name}.{method_name}",
suggested_syntax=_unparse_func_node(copied_node),
)
self.error(node, error_message)
def _visit_synchronous_method(self, node: ast.FunctionDef) -> None:
method_name = node.name
all_args = node.args
classdef = self.current_class_node
assert classdef is not None
if _has_bad_hardcoded_returns(node, classdef=classdef):
return self._Y034_error(node=node, cls_name=classdef.name)
if method_name in {"__exit__", "__aexit__"}:
return self._check_exit_method(node=node, method_name=method_name)
if all_args.kwonlyargs:
return
non_kw_only_args = _non_kw_only_args_of(all_args)
# Raise an error for defining __str__ or __repr__ on a class, but only if:
# 1). The method is not decorated with @abstractmethod
# 2). The method has the exact same signature as object.__str__/object.__repr__
if method_name in {"__repr__", "__str__"}:
if (
len(non_kw_only_args) == 1
and _is_object(node.returns, "str", from_={"builtins"})
and not any(_is_abstractmethod(deco) for deco in node.decorator_list)
):
self.error(node, Y029)
elif method_name in {"__eq__", "__ne__"}:
if len(non_kw_only_args) == 2 and _is_Any(non_kw_only_args[1].annotation):
self.error(node, Y032.format(method_name=method_name))
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
if self.in_class.active:
self._visit_synchronous_method(node)
self._visit_function(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
if self.in_class.active:
classdef = self.current_class_node
assert classdef is not None
method_name = node.name
if _has_bad_hardcoded_returns(node, classdef=classdef):
self._Y034_error(node=node, cls_name=classdef.name)
elif method_name == "__aexit__":
self._check_exit_method(node=node, method_name=method_name)
self._visit_function(node)
def _Y019_error(
self, node: ast.FunctionDef | ast.AsyncFunctionDef, typevar_name: str
) -> None:
cleaned_method = deepcopy(node)
cleaned_method.decorator_list.clear()
new_syntax = _unparse_func_node(cleaned_method)
new_syntax = re.sub(rf"\b{typevar_name}\b", "Self", new_syntax)
self.error(
# pass the node for the first argument to `self.error`,
# rather than the function node,
# as linenos differ in Python 3.7 and 3.8+ for decorated functions
node.args.args[0],
Y019.format(typevar_name=typevar_name, new_syntax=new_syntax),
)
def _check_instance_method_for_bad_typevars(
self,
*,
method: ast.FunctionDef | ast.AsyncFunctionDef,
first_arg_annotation: ast.Name | ast.Subscript,
return_annotation: ast.Name,
) -> None:
if not isinstance(first_arg_annotation, ast.Name):
return
if first_arg_annotation.id != return_annotation.id:
return
arg1_annotation_name = first_arg_annotation.id
if arg1_annotation_name.startswith("_"):
self._Y019_error(method, arg1_annotation_name)
def _check_class_method_for_bad_typevars(
self,
*,
method: ast.FunctionDef | ast.AsyncFunctionDef,
first_arg_annotation: ast.Name | ast.Subscript,
return_annotation: ast.Name,
) -> None:
if not isinstance(first_arg_annotation, ast.Subscript):
return
cls_typevar: str
if isinstance(first_arg_annotation.slice, ast.Name):
cls_typevar = first_arg_annotation.slice.id
else:
return
# Don't error if the first argument is annotated with `builtins.type[T]` or `typing.Type[T]`
# These are edge cases, and it's hard to give good error messages for them.
if not _is_name(first_arg_annotation.value, "type"):
return
if cls_typevar == return_annotation.id and cls_typevar.startswith("_"):
self._Y019_error(method, cls_typevar)
def check_self_typevars(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None:
pos_or_keyword_args = node.args.args
if not pos_or_keyword_args:
return
return_annotation = node.returns
if not isinstance(return_annotation, ast.Name):
return
first_arg_annotation = pos_or_keyword_args[0].annotation
if not isinstance(first_arg_annotation, (ast.Name, ast.Subscript)):
return
decorator_names = {
decorator.id
for decorator in node.decorator_list
if isinstance(decorator, ast.Name)
}
if "classmethod" in decorator_names or node.name == "__new__":
self._check_class_method_for_bad_typevars(
method=node,
first_arg_annotation=first_arg_annotation,
return_annotation=return_annotation,
)
elif "staticmethod" in decorator_names:
return
else:
self._check_instance_method_for_bad_typevars(
method=node,
first_arg_annotation=first_arg_annotation,
return_annotation=return_annotation,
)
def _visit_function(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None:
with self.in_function.enabled():
self.generic_visit(node)
for i, statement in enumerate(node.body):
if i == 0:
# normally, should just be "..."
if isinstance(statement, ast.Pass):
self.error(statement, Y009)
continue
# Ellipsis is fine. Str (docstrings) is not but we produce
# tailored error message for it elsewhere.
elif isinstance(statement, ast.Expr) and isinstance(
statement.value, (ast.Ellipsis, ast.Str)
):
continue
self.error(statement, Y010)
if self.in_class.active:
self.check_self_typevars(node)
def visit_arguments(self, node: ast.arguments) -> None:
self.generic_visit(node)
args = node.args[-len(node.defaults) :]
for arg, default in chain(
zip(args, node.defaults), zip(node.kwonlyargs, node.kw_defaults)
):
if default is None:
continue # keyword-only arg without a default
if not isinstance(default, ast.Ellipsis):
self.error(default, (Y014 if arg.annotation is None else Y011))
def _Y015_error(self, node: ast.Assign | ast.AnnAssign) -> None:
old_syntax = _unparse_assign_node(node)
copy_of_node = deepcopy(node)
copy_of_node.value = ast.Constant(value=...)
new_syntax = _unparse_assign_node(copy_of_node)
error_message = Y015.format(old_syntax=old_syntax, new_syntax=new_syntax)
self.error(node, error_message)
def error(self, node: ast.AST, message: str) -> None:
self.errors.append(Error(node.lineno, node.col_offset, message, PyiTreeChecker))
def run(self, tree: ast.AST) -> Iterable[Error]:
self.errors.clear()
self.visit(tree)
for (cls_name, typevar_name), def_node in self.typevarlike_defs.items():
if self.all_name_occurrences[typevar_name] == 1:
self.error(
def_node,
Y018.format(typevarlike_cls=cls_name, typevar_name=typevar_name),
)
yield from self.errors
_TYPE_COMMENT_REGEX = re.compile(r"#\s*type:\s*(?!\s?ignore)([^#]+)(\s*#.*?)?$")
def _check_for_type_comments(path: Path) -> Iterator[Error]:
stublines = path.read_text().splitlines()
for lineno, line in enumerate(stublines, start=1):
cleaned_line = line.strip()
if cleaned_line.startswith("#"):
continue
match = _TYPE_COMMENT_REGEX.search(cleaned_line)
if not match:
continue
type_comment = match.group(1).strip()
try:
ast.parse(type_comment)
except SyntaxError:
continue
yield Error(lineno, 0, Y033, PyiTreeChecker)
@dataclass
class PyiTreeChecker:
name: ClassVar[str] = "flake8-pyi"
version: ClassVar[str] = __version__
tree: ast.Module | None = None
filename: str = "(none)"
options: argparse.Namespace | None = None
def run(self) -> Iterable[Error]:
assert self.tree is not None
path = Path(self.filename)
if path.suffix == ".pyi":
yield from _check_for_type_comments(path)
visitor = PyiVisitor(filename=path)
for error in visitor.run(LegacyNormalizer().visit(self.tree)):
yield error
@classmethod
def add_options(cls, parser: OptionManager) -> None:
"""This is brittle, there's multiple levels of caching of defaults."""
if isinstance(parser.parser, argparse.ArgumentParser):
parser.parser.set_defaults(filename="*.py,*.pyi")
else:
for option in parser.options:
if option.long_option_name == "--filename":
option.default = "*.py,*.pyi"
option.option_kwargs["default"] = option.default
option.to_optparse().default = option.default
parser.parser.defaults[option.dest] = option.default
try:
parser.add_option(
"--no-pyi-aware-file-checker",
default=False,
action="store_true",
parse_from_config=True,
help="don't patch flake8 with .pyi-aware file checker",
)
except optparse.OptionConflictError:
# In tests, sometimes this option gets called twice for some reason.
pass
@classmethod
def parse_options(
cls, optmanager: OptionManager, options: argparse.Namespace, extra_args
) -> None:
"""This is also brittle, only checked with flake8 3.2.1 and master."""
if not options.no_pyi_aware_file_checker:
checker.FileChecker = PyiAwareFileChecker
# Please keep error code lists in README and CHANGELOG up to date
Y001 = "Y001 Name of private {} must start with _"
Y002 = (
"Y002 If test must be a simple comparison against sys.platform or sys.version_info"
)
Y003 = "Y003 Unrecognized sys.version_info check"
Y004 = "Y004 Version comparison must use only major and minor version"
Y005 = "Y005 Version comparison must be against a length-{n} tuple"
Y006 = "Y006 Use only < and >= for version comparisons"
Y007 = "Y007 Unrecognized sys.platform check"
Y008 = 'Y008 Unrecognized platform "{platform}"'
Y009 = 'Y009 Empty body should contain "...", not "pass"'
Y010 = 'Y010 Function body must contain only "..."'
Y011 = 'Y011 Default values for typed arguments must be "..."'
Y012 = 'Y012 Class body must not contain "pass"'
Y013 = 'Y013 Non-empty class body must not contain "..."'
Y014 = 'Y014 Default values for arguments must be "..."'
Y015 = 'Y015 Bad default value. Use "{new_syntax}" instead of "{old_syntax}"'
Y016 = 'Y016 Duplicate union member "{}"'
Y017 = "Y017 Only simple assignments allowed"
Y018 = 'Y018 {typevarlike_cls} "{typevar_name}" is not used'
Y019 = 'Y019 Use "_typeshed.Self" instead of "{typevar_name}", e.g. "{new_syntax}"'
Y020 = "Y020 Quoted annotations should never be used in stubs"
Y021 = "Y021 Docstrings should not be included in stubs"
Y022 = "Y022 Use {good_syntax} instead of {bad_syntax} (PEP 585 syntax)"
Y023 = "Y023 Use {good_syntax} instead of {bad_syntax}"
Y024 = 'Y024 Use "typing.NamedTuple" instead of "collections.namedtuple"'
Y025 = (
'Y025 Use "from collections.abc import Set as AbstractSet" '
'to avoid confusion with "builtins.set"'
)
Y026 = "Y026 Use typing_extensions.TypeAlias for type aliases"
Y027 = "Y027 Use {good_syntax} instead of {bad_syntax} (PEP 585 syntax)"
Y028 = "Y028 Use class-based syntax for NamedTuples"
Y029 = "Y029 Defining __repr__ or __str__ in a stub is almost always redundant"
Y030 = "Y030 Multiple Literal members in a union. {suggestion}"
Y031 = "Y031 Use class-based syntax for TypedDicts where possible"
Y032 = (
'Y032 Prefer "object" to "Any" for the second parameter in "{method_name}" methods'
)
Y033 = 'Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")'
Y034 = 'Y034 {methods} usually return "self" at runtime. Consider using "_typeshed.Self" in "{method_name}", e.g. "{suggested_syntax}"'
Y035 = 'Y035 "{var}" in a stub file must have a value, as it has the same semantics as "{var}" at runtime.'
Y036 = "Y036 Badly defined {method_name} method: {details}"
Y037 = "Y037 Use PEP 604 union types instead of {old_syntax} (e.g. {example})."
Y038 = 'Y038 Use "from collections.abc import Set as AbstractSet" instead of "from typing import AbstractSet" (PEP 585 syntax)'
Y039 = 'Y039 Use "str" instead of "typing.Text"'
Y040 = 'Y040 Do not inherit from "object" explicitly, as it is redundant in Python 3'
Y041 = 'Y041 Use "{implicit_supertype}" instead of "{implicit_subtype} | {implicit_supertype}" (see "The numeric tower" in PEP 484)'
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from designate import backend
class BackendTestMixin(object):
def get_backend_driver(self):
return backend.get_backend(cfg.CONF['service:agent'].backend_driver,
central_service=self.central_service)
def test_constructor(self):
self.get_backend_driver()
|
import random
import socket
import threading
import unittest
import telethon.network.authenticator as authenticator
from telethon.extensions import TcpClient
from telethon.network import Connection
def run_server_echo_thread(port):
def server_thread():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', port))
s.listen(1)
connection, address = s.accept()
with connection:
data = connection.recv(16)
connection.send(data)
server = threading.Thread(target=server_thread)
server.start()
class NetworkTests(unittest.TestCase):
@unittest.skip("test_tcp_client needs fix")
def test_tcp_client(self):
port = random.randint(50000, 60000) # Arbitrary non-privileged port
run_server_echo_thread(port)
msg = b'Unit testing...'
client = TcpClient()
client.connect('localhost', port)
client.write(msg)
self.assertEqual(msg, client.read(15),
msg='Read message does not equal sent message')
client.close()
@unittest.skip("Some parameters changed, so IP doesn't go there anymore.")
def test_authenticator(self):
transport = Connection('149.154.167.91', 443)
self.assertTrue(authenticator.do_authentication(transport))
transport.close()
|
import datetime
import re
import time
from functools import partialmethod
import jdatetime
from django.core import exceptions
from django.db import models
from django.conf import settings
import warnings
from django.utils import timezone
from django.utils.encoding import smart_str, smart_text
from django.utils.translation import ugettext as _
from django_jalali import forms
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class jManager(models.Manager):
"""we need to rewrite this class to handle year filter"""
def filter(self, *args, **kwargs):
"""if filter is year we divide to __gte and __lte"""
new_kwargs = {}
for k in kwargs:
if '__year' in k:
filed_name = k.split('__year')
first_year = jdatetime.datetime(int(kwargs[k]), 1, 1)
new_kwargs['%s__gte' % filed_name[0]] = jdatetime.datetime(
int(kwargs[k]), 1, 1)
last_day = 29
if first_year.isleap():
last_day = 30
new_kwargs['%s__lte' % filed_name[0]] = jdatetime.datetime(
int(kwargs[k]), 12, last_day, 23, 59, 59)
else:
new_kwargs[k] = kwargs[k]
return models.Manager.filter(self, *args, **new_kwargs)
class jDateField(models.DateField):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(
self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, default=models.NOT_PROVIDED, **kwargs
):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
# HACKs : auto_now_add/auto_now should be
# done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
# Convert datetime.date default value to jdatetime.date
if isinstance(default, datetime.date):
default = jdatetime.date.fromgregorian(date=default)
models.Field.__init__(self, verbose_name, name, default=default, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Convert jdatetime.date default value to datetime.date
if 'default' in kwargs and isinstance(kwargs['default'], jdatetime.date):
kwargs['default'] = kwargs['default'].togregorian()
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def parse_date(self, date_obj):
"Take a datetime object and convert it to jalali date"
if isinstance(date_obj, datetime.datetime):
return jdatetime.date.fromgregorian(date=date_obj.date())
if isinstance(date_obj, datetime.date):
return jdatetime.date.fromgregorian(date=date_obj)
if not ansi_date_re.search(date_obj):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, date_obj.split('-'))
try:
if year > 1500:
return jdatetime.date.fromgregorian(
date=datetime.date(year, month, day))
else:
return jdatetime.date(year, month, day)
except ValueError as e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def from_db_value(self, value, expression, connection):
if value is None:
return value
return self.parse_date(value)
def to_python(self, value):
if value is None:
return value
if isinstance(value, jdatetime.datetime):
return value.date()
if isinstance(value, jdatetime.date):
return value
return self.parse_date(value)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = jdatetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super().contribute_to_class(cls, name)
if not self.null:
setattr(
cls,
'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls,
'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_lookup(self, lookup_type, value):
"""this class dosn't work in month and day searh !"""
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
prep = self.get_prep_value(value)
if type(prep) == datetime.datetime or type(prep) == datetime.date:
return prep
return prep.togregorian()
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
# this else never happen !
try:
return int(value)
except ValueError:
raise ValueError(
"The __year lookup type requires an integer argument")
if lookup_type in ('month', 'day', 'week_day'):
raise ValueError(
"jDateField dosn't work with month, day and week_day !")
return super().get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
if isinstance(value, jdatetime.datetime):
value = value.togregorian().date()
if isinstance(value, jdatetime.date):
value = value.togregorian()
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
date_string = ''
else:
date_string = smart_text(value)
return date_string
def formfield(self, **kwargs):
defaults = {'form_class': forms.jDateField}
kwargs.update(defaults)
return super().formfield(**kwargs)
class jDateTimeField(models.DateTimeField):
default_error_messages = {
'invalid': _(
u'Enter a valid date/time in '
u'YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def __init__(
self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, default=models.NOT_PROVIDED, **kwargs
):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
# HACKs : auto_now_add/auto_now should be
# done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
# Convert datetime.datetime default value to jdatetime.datetime
if isinstance(default, datetime.date):
default = jdatetime.datetime.fromgregorian(datetime=default)
models.Field.__init__(self, verbose_name, name, default=default, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Convert jdatetime.datetime default value to datetime.datetime
default = kwargs.get('default')
if default and isinstance(default, (jdatetime.datetime)):
kwargs['default'] = default.togregorian()
return name, path, args, kwargs
def get_internal_type(self):
return "DateTimeField"
def parse_date(self, datetime_obj):
"Take a datetime object and convert it to jalali date"
if isinstance(datetime_obj, datetime.datetime):
try:
if datetime_obj.year < 1700:
return jdatetime.datetime(
datetime_obj.year, datetime_obj.month,
datetime_obj.day, datetime_obj.hour,
datetime_obj.minute, datetime_obj.second,
datetime_obj.microsecond, datetime_obj.tzinfo)
else:
return jdatetime.datetime.fromgregorian(
datetime=datetime_obj)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'])
if isinstance(datetime_obj, datetime.date):
try:
if datetime_obj.year < 1700:
return jdatetime.datetime(datetime_obj.year,
datetime_obj.month,
datetime_obj.day)
else:
return jdatetime.datetime.fromgregorian(date=datetime_obj)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'])
# Attempt to parse a datetime:
datetime_obj = smart_str(datetime_obj)
if not datetime_obj:
return None
# split usecs, because they are not recognized by strptime.
if '.' in datetime_obj:
try:
datetime_obj, usecs = datetime_obj.split('.')
if '+' in usecs:
usecs, tz = usecs.split('+')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
formats = [
('%Y-%m-%d %H:%M:%S', 6),
('%Y-%m-%d %H:%M', 5),
('%Y-%m-%d', 3),
]
for format, c in formats:
try: # Seconds are optional, so try converting seconds first.
t = time.strptime(datetime_obj, format)
if t.tm_year > 1700:
return datetime.datetime(
*time.strptime(datetime_obj, format)[:c],
**kwargs)
else:
return jdatetime.datetime(
*time.strptime(datetime_obj, format)[:c],
**kwargs)
except ValueError:
try:
return jdatetime.datetime.strptime(
datetime_obj,
'%Y-%m-%d %H:%M'
).replace(**kwargs)
except ValueError:
pass
raise exceptions.ValidationError(self.error_messages['invalid'])
def from_db_value(self, value, expression, connection):
if value is None:
return value
if value is not None and settings.USE_TZ:
# Remove and set timezone information
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
value = timezone.make_aware(value, default_timezone)
return self.parse_date(value)
def to_python(self, value):
if value is None:
return value
if isinstance(value, jdatetime.datetime):
return value
if isinstance(value, jdatetime.date):
try:
return jdatetime.datetime(value.year, value.month, value.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'])
return self.parse_date(value)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = jdatetime.datetime.now()
if value is not None and settings.USE_TZ and timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn(
"DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning
)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
if isinstance(value, jdatetime.datetime):
value = value.togregorian()
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
date_string = ''
else:
date_string = smart_text(value)
return date_string
def contribute_to_class(self, cls, name):
super().contribute_to_class(cls, name)
if not self.null:
setattr(
cls,
'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls,
'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_lookup(self, lookup_type, value):
"""this class dosn't work in month and day searh !"""
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
prep = self.get_prep_value(value)
if type(prep) == datetime.datetime or type(prep) == datetime.date:
return prep
return prep.togregorian()
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
# this else never happen !
try:
return int(value)
except ValueError:
raise ValueError(
"The __year lookup type requires an integer argument")
if lookup_type in ('month', 'day', 'week_day'):
raise ValueError(
"jDateField dosn't work with month, day and week_day !")
return super().get_prep_lookup(lookup_type, value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.jDateTimeField}
kwargs.update(defaults)
return super().formfield(**kwargs)
|
import logging
import click
import click_log
import pandas as pd
from linkml_runtime.utils.schemaview import SchemaView
from sheets_and_friends.converters.linkml2dataharmonizer import (
LinkML2DataHarmonizer,
# ValidationConverter,
)
from sheets_and_friends.converters.sheet2linkml import Sheet2LinkML
import pprint
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option("--model_file", type=click.Path(exists=True), required=True)
@click.option("--selected_class", required=True)
@click.option("--default_section", default="default", show_default=True)
@click.option("--default_source", default="", show_default=True)
@click.option("--default_capitalize", default="", show_default=True)
@click.option("--default_data_status", default="", show_default=True)
@click.option(
"--output_file", type=click.Path(), default="target/data.tsv", show_default=True
)
def linkml2dataharmonizer(
model_file,
selected_class,
default_section,
default_source,
default_capitalize,
default_data_status,
output_file,
):
lml_dh = LinkML2DataHarmonizer(linkml_model_path=model_file)
section_list = lml_dh._get_section_list(selected_class, default_section)
term_pv_dict = lml_dh._get_term_pv_list(
selected_class,
default_section,
default_source,
default_capitalize,
default_data_status,
)
term_list = term_pv_dict["term"]
pv_list = term_pv_dict["pv"]
consolidated_list = lml_dh._combined_list(
section_list, term_list, pv_list, selected_class, default_section
)
if output_file:
consolidated_list.to_csv(output_file, sep="\t", index=False)
else:
click.echo(consolidated_list)
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option("--model_file", type=click.Path(exists=True), required=True)
@click.option("--output_file", type=click.Path(), required=True)
def mixs_package_map(model_file, output_file):
mixs_view = SchemaView(model_file)
# should be "MIxS"
# logger.info(mixs_view.schema.name)
mixs_classes = mixs_view.all_classes()
mixs_class_names = list(mixs_classes.keys())
mixs_class_names.sort()
blank_class_row = {
"class_name": None,
"is_a_parent": None,
"is_mixin": False,
"mixins_used": None,
}
class_row_list = []
for current_class_name in mixs_class_names:
# logger.info(current_class_name)
current_cd = mixs_view.get_class(current_class_name)
# current_is_a = current_cd.is_a
current_mixin_flag = current_cd.mixin
mixins_used = current_cd.mixins
# logger.info(f"{i}\t{current_is_a}\t{current_mixin_flag}\t{mixins_used}")
current_row = blank_class_row.copy()
current_row["class_name"] = current_class_name
if current_cd.is_a is not None:
current_row["is_a_parent"] = str(current_cd.is_a)
if current_mixin_flag:
current_row["is_mixin"] = True
# current_row['mixins_used'] = str(mixins_used)
current_row["mixins_used"] = "|".join(mixins_used)
class_row_list.append(current_row)
mixs_class_frame = pd.DataFrame(class_row_list)
# logger.info(mixs_class_frame)
# for now, it looks like all is_a parents of any other class are the packages
package_classes = list(mixs_class_frame["is_a_parent"].drop_duplicates())
package_classes = [
current_class for current_class in package_classes if current_class is not None
]
package_classes.sort()
# logger.info(package_classes)
# env_package_pvs = mixs_view.get_enum('env_package_enum').permissible_values
# ep_pvs_names = list(env_package_pvs.keys())
# ep_pvs_names.sort()
# logger.info(ep_pvs_names)
mims_package_classes = mixs_class_frame["class_name"].loc[
mixs_class_frame["mixins_used"].eq("MIMS")
& mixs_class_frame["is_a_parent"].isin(package_classes)
]
mims_package_classes = list(mims_package_classes)
mims_package_classes.sort()
selected_classes = [
"built environment",
"microbial mat_biofilm",
"miscellaneous natural or artificial environment",
"plant-associated",
"sediment",
"soil",
"wastewater_sludge",
"water",
]
blank_slot_row = {"class_name": None, "slot_name": None, "disposition": None}
slot_row_list = []
for current_pc_name in selected_classes:
# logger.info(current_pc_name)
induceds = mixs_view.class_induced_slots(current_pc_name)
induceds_names = [ci.name for ci in induceds]
induceds_names.sort()
for current_induced in induceds_names:
# logger.info(f"{current_pc_name} {current_induced}")
current_slot_row = blank_slot_row.copy()
current_slot_row["class_name"] = current_pc_name
current_slot_row["slot_name"] = current_induced
slot_row_list.append(current_slot_row)
slot_frame = pd.DataFrame(slot_row_list)
# logger.info(slot_frame)
slot_frame.to_csv(output_file, sep="\t", index=False)
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option("--model_file", type=click.Path(exists=True), required=True)
@click.option("--selected_class", required=True)
@click.option(
"--output_file", type=click.Path(), default="target/data.tsv", show_default=True
)
def range_str_ser(model_file, selected_class, output_file):
# model_file = "target/soil_biosample_modular_annotated.yaml"
# selected_class = "soil_biosample"
# soil_biosample_regex_insight.tsv
row_list = []
sb_view = SchemaView(model_file)
sb_class = sb_view.get_class(selected_class)
sb_slots = sb_class.slots
sb_slots.sort()
sb_enums = sb_view.all_enums()
sb_enum_names = list(sb_enums.keys())
for i in sb_slots:
i_struct = sb_view.get_slot(i)
elements = ["name", "title", "string_serialization", "range"]
row_dict = {}
for j in elements:
row_dict[j] = i_struct[j]
row_dict["enum_range"] = False
row_dict["enum_string_ser"] = False
row_dict["enum_discrepancy"] = False
# row_dict['enum_conflict'] = False
if i_struct.range in sb_enum_names:
row_dict["enum_range"] = True
if row_dict["string_serialization"] == "enumeration":
row_dict["enum_string_ser"] = True
row_dict["enum_discrepancy"] = (
row_dict["enum_range"] != row_dict["enum_string_ser"]
)
row_list.append(row_dict)
row_frame = pd.DataFrame(row_list)
row_frame.to_csv(output_file, sep="\t", index=False)
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option(
"--client_secret",
default="local/client_secret_fresh-sheet2linkml.apps.googleusercontent.com.json",
type=click.Path(exists=True),
help="path your google sheet authentication file",
show_default=True,
)
@click.option(
"--sheet_id",
default="1WErXj8sM5uJi51VVLNQZDilDF7wMiyBC2T4zELp7Axc",
help="ID of the google sheet that will provide the curated enums",
show_default=True,
)
@click.option(
"--tab_title",
default="Subset_EnvO_Broad_Local_Medium_terms_062221",
help="which tab in the google sheet will provide the curated enums?",
show_default=True,
)
@click.option(
"--curated_tsv_out",
default="target/tidy_triad_curations.tsv",
type=click.Path(),
help="destination for modified data.tsv",
show_default=True,
)
@click.option(
"--env_package",
default="soil",
help="""for which environmental packages (as expressed in the google sheet)
do you want do extract curated enums??""",
)
def tidy_triad_curations(
client_secret, sheet_id, tab_title, curated_tsv_out, env_package
):
raw = Sheet2LinkML.get_gsheet_frame(client_secret, sheet_id, tab_title)
# raw.columns = ["enum", "raw_id", "permissible_value", "definition", "env_package"]
#
# raw["partial"] = raw["raw_id"].str.replace(
# "<http://purl.obolibrary.org/obo/ENVO_", "ENVO:", regex=True
# )
#
# raw["term_id"] = raw["partial"].str.replace(">", "", regex=True)
#
# raw = raw[["env_package", "enum", "permissible_value", "term_id"]]
#
# raw["env_package"] = raw["env_package"].str.split("|", expand=False)
raw["env_package"] = raw["packages_consensus"].str.split("|", expand=False)
df_explode = raw.explode("env_package")
df_explode = df_explode.loc[df_explode["env_package"].eq(env_package)]
# logger.info(df_explode)
# df_explode["env_package"] = df_explode["env_package"].str.lower()
df_explode.to_csv(curated_tsv_out, sep="\t", index=False)
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option(
"--data_tsv_in",
default="target/data.tsv",
type=click.Path(exists=True),
help="path to DataHarmonizer data.tsv input",
show_default=True,
)
@click.option(
"--data_tsv_out",
default="target/data_promoted.tsv",
type=click.Path(),
help="destination for modified data.tsv",
show_default=True,
)
@click.option(
"--promote", multiple=True, help="which columns should be promoted to select type?"
)
@click.option(
"--extra_row_files",
multiple=True,
type=click.Path(exists=True),
help="path to files defining the new select/enum column(s) etc.",
show_default=True,
)
def promote_to_select(data_tsv_in, data_tsv_out, promote, extra_row_files):
data_in = pd.read_csv(data_tsv_in, sep="\t")
for i in promote:
logger.info(i)
data_in.loc[data_in["label"].eq(i), "datatype"] = "select"
data_in.loc[data_in["label"].eq(i), "pattern"] = ""
to_concat = [data_in]
for i in extra_row_files:
logger.info(i)
temp = pd.read_csv(i, sep="\t")
to_concat.append(temp)
catted = pd.concat(to_concat)
catted.to_csv(data_tsv_out, sep="\t", index=False)
|
# -*- coding: utf-8 -*-
from unittest.mock import MagicMock, patch
from chaosgcp.gke.nodepool.actions import create_new_nodepool, delete_nodepool, \
swap_nodepool
import fixtures
@patch('chaosgcp.gke.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgcp.build', autospec=True)
@patch('chaosgcp.Credentials', autospec=True)
def test_create_nodepool(Credentials, service_builder, wait_on_operation):
project_id = fixtures.configuration["gcp_project_id"]
cluster_name = fixtures.configuration["gcp_gke_cluster_name"]
zone = fixtures.configuration["gcp_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = create_new_nodepool(
body=fixtures.nodepool.body,
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
wait_on_operation.assert_called_with(ops_svc,
projectId=fixtures.configuration["gcp_project_id"],
zone=fixtures.configuration["gcp_zone"],
operationId="mynodepool")
@patch('chaosgcp.gke.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgcp.build', autospec=True)
@patch('chaosgcp.Credentials', autospec=True)
def test_delete_nodepool(Credentials, service_builder, wait_on_operation):
project_id = fixtures.configuration["gcp_project_id"]
cluster_name = fixtures.configuration["gcp_gke_cluster_name"]
zone = fixtures.configuration["gcp_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = delete_nodepool(
node_pool_id="mynodepool",
secrets=fixtures.secrets,
configuration=fixtures.configuration
)
delete_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
nodePoolId="mynodepool")
wait_on_operation.assert_called_with(ops_svc,
projectId=fixtures.configuration["gcp_project_id"],
zone=fixtures.configuration["gcp_zone"],
operationId="mynodepool")
@patch('chaosgcp.gke.nodepool.actions.drain_nodes', autospec=False)
@patch('chaosgcp.gke.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgcp.build', autospec=True)
@patch('chaosgcp.Credentials', autospec=True)
def test_swap_nodepool(Credentials, service_builder, wait_on_operation,
drain_nodes):
project_id = fixtures.configuration["gcp_project_id"]
cluster_name = fixtures.configuration["gcp_gke_cluster_name"]
zone = fixtures.configuration["gcp_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "default-pool"
}
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = swap_nodepool(
old_node_pool_id="mynodepool",
new_nodepool_body=fixtures.nodepool.body,
delete_old_node_pool=True,
secrets=fixtures.secrets_with_k8s,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
drain_nodes.assert_called_with(
timeout=120, delete_pods_with_local_storage=False,
secrets=fixtures.secrets_with_k8s,
label_selector="cloud.google.com/gke-nodepool=mynodepool"
)
delete_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
nodePoolId="mynodepool")
wait_on_operation.assert_called_with(ops_svc,
projectId=fixtures.configuration["gcp_project_id"],
zone=fixtures.configuration["gcp_zone"],
operationId="mynodepool")
@patch('chaosgcp.gke.nodepool.actions.drain_nodes', autospec=False)
@patch('chaosgcp.gke.nodepool.actions.wait_on_operation', autospec=False)
@patch('chaosgcp.build', autospec=True)
@patch('chaosgcp.Credentials', autospec=True)
def test_swap_nodepool_without_delete(Credentials, service_builder,
wait_on_operation, drain_nodes):
project_id = fixtures.configuration["gcp_project_id"]
cluster_name = fixtures.configuration["gcp_gke_cluster_name"]
zone = fixtures.configuration["gcp_zone"]
Credentials.from_service_account_file.return_value = MagicMock()
service = MagicMock()
service_builder.return_value = service
nodepool_svc = MagicMock()
service.projects().zones().clusters().nodePools.return_value = nodepool_svc
create_np = MagicMock()
nodepool_svc.create = create_np
create_np.return_value.execute.return_value = {
"name": "default-pool"
}
delete_np = MagicMock()
nodepool_svc.delete = delete_np
delete_np.return_value.execute.return_value = {
"name": "mynodepool"
}
ops_svc = MagicMock()
service.projects().zones().operations.return_value = ops_svc
response = swap_nodepool(
old_node_pool_id="mynodepool",
new_nodepool_body=fixtures.nodepool.body,
delete_old_node_pool=False,
secrets=fixtures.secrets_with_k8s,
configuration=fixtures.configuration
)
create_np.assert_called_with(
projectId=project_id, zone=zone, clusterId=cluster_name,
body=fixtures.nodepool.body)
drain_nodes.assert_called_with(
timeout=120, delete_pods_with_local_storage=False,
secrets=fixtures.secrets_with_k8s,
label_selector="cloud.google.com/gke-nodepool=mynodepool"
)
delete_np.assert_not_called()
|
import os, sys, operator
from step3fcn import *
# must rpovide the dict file and the project name
dictfile = sys.argv[1]
proj = sys.argv[2]
ppath = "../../proj/"+proj+"/"
ptKey = "../../proj/"+proj+"/ptselection/ptkey.txt"
noteMdata = "../../res/corpus/testnotemdata.txt"
# target classes #
target_class = ["mbc","drecur","lrecur","loco","mets"]
s6pts = getPids(0,ptKey) #S6 ids for subset
termDict = getTerminology(dictfile) #get terminology
#for pt in s6pts: print pt,s6pts[pt]
noteMDict = loadOncoNoteMdata(noteMdata) #patient_id|note_id|doc_description|age_at_note_DATE_in_days|note_year
seqDict = {}
for target in target_class:
seqFile = ppath +"ants/"+target+"/extraction*.tsv"
tmpDict = loadSeqs(seqFile,noteMDict,termDict)
seqDict.update(tmpDict)
print len(seqDict)
for oncoid in s6pts:
pt = str(s6pts[oncoid])
#for pt in testpts:
# print pt
ptAnts = []
fout = open(ppath+"/ptseq/seqs_"+pt+".txt","w")
for sid in seqDict:
tmp = seqDict[sid]
sinfo = tmp.split("|")
#print sinfo
tmpsinfo = sinfo[0].split("-")
if tmpsinfo[1]==pt:
#print "found:",sinfo
toff = sinfo[7]
litem = [toff,tmp]
if litem not in ptAnts:
ptAnts.append(litem)
ptList = sorted(ptAnts, key=operator.itemgetter(0))
#tmpPtList = set(ptList)
#ptList = list(tmpPtList)
for item in ptList:
#print item[0],item[1]
print >> fout, item[1]
fout.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Package setup."""
import os
from codecs import open
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
version_path = os.path.join(here, "tantrum", "version.py")
about = {}
with open(version_path, "r", "utf-8") as f:
x = f.readlines()
contents = "\n".join(a for a in x if not a.startswith("#"))
exec(contents, about) # nosec
with open("README.md", "r", "utf-8") as f:
readme = f.read()
packages = find_packages()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__author_email__"],
url=about["__url__"],
packages=packages,
package_data={"": ["LICENSE"]},
package_dir={"tantrum": "tantrum"},
scripts=[], # TODO(!)
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=["requests[security,socks]", "six", "xmltodict"],
tests_require=[],
license=about["__license__"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'depot_tools/git',
'recipe_engine/json',
'recipe_engine/path',
'perf_dashboard',
'recipe_engine/context',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
# Constants
ANDROID_TOOLS_GIT = 'https://chromium.googlesource.com/android_tools'
TEST_FILES_URL = 'http://downloads.webmproject.org/test_data/libvpx'
# Device root is a special folder on the device which we have permissions to
# read / write
DEVICE_ROOT = '/data/local/tmp'
# TODO (joshualitt) the configure script is messed up so we need a relative
# path. Essentially, it must be using argv[0] when invoking some of the
# scripts in the libvpx directory
CONFIGURE_PATH_REL = './libvpx/configure'
BUILDER_TO_DEVICE = freeze({
'Nexus 5 Builder' : 'nexus_5',
'Nexus 7 Builder': 'nexus_7'
})
from recipe_engine.recipe_api import Property
PROPERTIES = {
'libvpx_git_url': Property(),
'buildername': Property(),
}
def RunSteps(api, libvpx_git_url, buildername):
# Paths and other constants
build_root = api.path['start_dir']
# Android tools DEPS
android_tools_root = build_root.join('android_tools')
adb = android_tools_root.join('sdk', 'platform-tools', 'adb')
ndk_root = android_tools_root.join('ndk')
# libvpx paths
libvpx_root = build_root.join('libvpx')
test_data = build_root.join('test_data')
api.python.inline(
'clean_build', r"""
import os, sys, shutil
root = sys.argv[1]
nuke_dirs = sys.argv[2:]
for fname in os.listdir(root):
path = os.path.join(root, fname)
if os.path.isfile(path):
os.unlink(path)
elif fname in nuke_dirs:
shutil.rmtree(path)
""", args=[build_root, 'libs', 'obj', 'armeabi-v7a'])
# Checkout android_tools and libvpx. NDK and SDK are required to build
# libvpx for android
api.git.checkout(
ANDROID_TOOLS_GIT, dir_path=android_tools_root, recursive=True)
api.git.checkout(
libvpx_git_url, dir_path=libvpx_root, recursive=True)
# The dashboards need a number to assign to this build for ordering purposes.
with api.context(cwd=libvpx_root):
step_result = api.git('number', stdout=api.raw_io.output_text())
libvpx_revision_number = step_result.stdout
api.step(
'configure', [
CONFIGURE_PATH_REL, '--disable-examples', '--disable-install-docs',
'--disable-install-srcs', '--enable-unit-tests', '--enable-webm-io',
'--disable-vp8-encoder', '--enable-vp9-encoder',
'--enable-decode-perf-tests', '--enable-external-build',
'--enable-vp8-decoder', '--enable-vp9-decoder',
'--enable-encode-perf-tests', '--disable-realtime-only',
'--sdk-path=%s' % ndk_root, '--target=armv7-android-gcc'])
# NDK requires NDK_PROJECT_PATH environment variable to be defined
with api.context(env={'NDK_PROJECT_PATH': build_root}):
api.step(
'ndk-build', [
ndk_root.join('ndk-build'),
'APP_BUILD_SCRIPT=%s'
% libvpx_root.join('test', 'android', 'Android.mk'),
'APP_ABI=armeabi-v7a', 'APP_PLATFORM=android-14',
'APP_OPTIM=release', 'APP_STL=gnustl_static'])
test_root = libvpx_root.join('test')
api.python(
'get_files', test_root.join('android', 'get_files.py'),
args=[
'-i', test_root.join('test-data.sha1'),
'-o', test_data, '-u', TEST_FILES_URL])
api.python(
'transfer_files',
api.package_repo_resource('scripts', 'slave', 'android',
'transfer_files.py'),
args=[adb, DEVICE_ROOT, test_data])
lib_root = build_root.join('libs', 'armeabi-v7a')
api.step('push_so', [adb, 'push', lib_root, DEVICE_ROOT])
step_result = api.python.inline(
'adb_wrap', r"""
import sys, subprocess, time
out = open(sys.argv[1], "w")
p = subprocess.Popen(sys.argv[2:], stdout=out)
while p.poll() is None:
print "Still working"
time.sleep(60)
print "done"
sys.exit(p.returncode)
""", args=[api.raw_io.output_text(), adb, 'shell',
'LD_LIBRARY_PATH=' + DEVICE_ROOT,
'LIBVPX_TEST_DATA_PATH=' + DEVICE_ROOT, DEVICE_ROOT +
'/vpx_test', '--gtest_filter=-*Large*'])
step_result = api.python(
'scrape_logs',
libvpx_root.join('test', 'android', 'scrape_gtest_log.py'),
args=['--output-json', api.json.output()],
stdin=api.raw_io.input_text(step_result.raw_io.output_text))
data = step_result.json.output
# Data is json array in the format as follows:
# videoName: name
# threadCount: #ofthreads
# framesPerSecond: fps
points = []
device = BUILDER_TO_DEVICE[buildername]
#TODO(martiniss) convert loop
for i in data:
if i["type"] == "encode_perf_test":
# Two data points for encoder tests, FPS and minPsnr
testname = "libvpx/encode/perf_test/fps/" + device + "/"
testname = testname + i["videoName"] + "_" + str(i["speed"])
p = api.perf_dashboard.get_skeleton_point(
testname,
libvpx_revision_number,
i["framesPerSecond"],
bot=api.m.properties["bot_id"])
p['units'] = "fps"
points.append(p)
#minPsnr
testname = "libvpx/encode/perf_test/minPsnr/" + device + "/"
testname = testname + i["videoName"] + "_" + str(i["speed"])
p = api.perf_dashboard.get_skeleton_point(
testname,
libvpx_revision_number,
i["minPsnr"],
bot=api.m.properties["bot_id"])
p['units'] = "dB"
points.append(p)
else:
testname = "libvpx/decode/perf_test/" + device + "/"
testname = testname + i["videoName"] + "_" + str(i["threadCount"])
p = api.perf_dashboard.get_skeleton_point(
testname,
libvpx_revision_number,
i["framesPerSecond"],
bot=api.m.properties["bot_id"])
p['units'] = "fps"
points.append(p)
api.perf_dashboard.set_default_config()
api.perf_dashboard.add_point(points)
def GenTests(api):
# Right now we just support linux, but one day we will have mac and windows
# as well
yield (
api.test('basic_linux_64') +
api.properties(
libvpx_git_url='https://chromium.googlesource.com/webm/libvpx',
bot_id='libvpx-bot', buildername='Nexus 5 Builder',
mastername='client.libvpx', buildnumber='75') +
api.step_data('git number', stdout=api.raw_io.output_text('42')) +
api.step_data('adb_wrap',
api.raw_io.output_text("This is text with json inside normally")) +
api.step_data('scrape_logs', api.json.output(
[
{
"type" : "decode_perf_test",
"decodeTimeSecs": 29.344307,
"framesPerSecond": 609.82868,
"threadCount": 1,
"totalFrames": 17895,
"version": "v1.3.0-2045-g38c2d37",
"videoName": "vp90-2-bbb_426x240_tile_1x1_180kbps.webm"
},
{
"type" : "encode_perf_test",
"encodeTimeSecs": 56.277676,
"speed" : 5,
"minPsnr" : 43.5,
"framesPerSecond": 317.976883,
"threadCount": 2,
"totalFrames": 17895,
"version": "v1.3.0-2045-g38c2d37",
"videoName": "vp90-2-bbb_640x360_tile_1x2_337kbps.webm"
}
])))
|
"""Runs inference on the model that was build by reconstruct_mind.py."""
import random
from ai_replica.utils.files import read_json
from ai_replica.utils.nlp import get_bag_of_words, similarity_score_of_word_bags
def load_model(load_path):
"""
>>> res0 = load_model("ai_replica/resources/mock_data/mock_personal_data/reconstructed_mind_models/model.txt")
>>> len(res0["thoughts"])
567
>>> res0["thoughts"][0]["words_bag"]
['ancestors', 'anecdotes', 'any', 'ever', 'had', 'have', 'i', 'in', 'little', 'my', 'obtaining', 'of', 'pleasure']
"""
loaded_model = read_json(load_path)
return loaded_model
global_model = load_model("personal_data/reconstructed_mind_models/model.txt")
def get_model_answer(user_input, seed=None, custom_model=None):
"""
>>> test_model = load_model("ai_replica/resources/mock_data/mock_personal_data/reconstructed_mind_models/model.txt")
>>> answer0 = get_model_answer(user_input="love", custom_model=test_model, seed=42)
>>> "my mother’s love" in answer0
True
"""
random.seed(seed)
input_bag = get_bag_of_words(user_input)
model = global_model if custom_model is None else custom_model
highest_score = 0
closest_thought = model["thoughts"][0]
for thought in model["thoughts"]:
thought_bag = thought["words_bag"]
score = similarity_score_of_word_bags(input_bag, thought_bag)
if score > highest_score:
highest_score = score
closest_thought = thought
if score == highest_score:
if random.random() > 0.5:
highest_score = score
closest_thought = thought
return closest_thought["answer"].strip()
|
from flask_mail import Message
from flask import render_template
from . import mail
def welcome_message(subject,template,to,**kwargs):
sender_email = "stephenremmi21@gmail.com"
email = Message(subject, sender=sender_email, recipients=[to])
email.body = render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
def notification_message(subject,template,to,**kwargs):
sender_email = "stephenremmi21@gmail.com"
email = Message(subject, sender=sender_email, recipients=[to])
email.body = render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 14:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20170326_1250'),
]
operations = [
migrations.AlterField(
model_name='person',
name='cv',
field=models.FileField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='person',
name='display_pic',
field=models.FileField(blank=True, upload_to=''),
),
]
|
"""
This file offers the methods to automatically retrieve the graph Methanobrevibacter gottschalkii.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MethanobrevibacterGottschalkii(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Methanobrevibacter gottschalkii graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Methanobrevibacter gottschalkii graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MethanobrevibacterGottschalkii",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
# -*- coding:utf-8 -*-
"""
feature factory web application api route
"""
from django.conf.urls import patterns, url
from apps.interface.views.featureconfig import *
# from apps.interface.views.featureprocess import *
urlpatterns = patterns(
'',
url(r'^common_conf/show/(?P<featurename>\w+)/(?P<page>\d+)/$', FeatureConfig.as_view(),
name='feature_config_show'),
url(r'^common_conf/update/(?P<item>\w+)/(?P<featureid>\d+)/$', FeatureConfig.as_view(),
name='feature_config_update'),
url(r'^common_conf/add/(?P<item>\w+)/$', FeatureConfig.as_view(), name='feature_config_add'),
url(r'^shunt_conf/show/(?P<featurename>\w+)/(?P<page>\d+)/$', FeatureShuntConfig.as_view(),
name='feature_shunt_config_show'),
url(r'^shunt_conf/update/(?P<featureid>\d+)/$', FeatureShuntConfig.as_view(),
name='feature_relevance_config_update'),
url(r'^shunt_conf/add/$', FeatureShuntConfig.as_view(), name='feature_shunt_config_add'),
url(r'^relevance_conf/show/(?P<featurename>\w+)/(?P<page>\d+)/$', FeatureRelevanceConfig.as_view(),
name='feature_shunt_config_show'),
url(r'^relevance_conf/update/(?P<featureid>\d+)/$', FeatureRelevanceConfig.as_view(),
name='feature_relevance_config_update'),
url(r'^relevance_conf/add/$', FeatureRelevanceConfig.as_view(), name='feature_relevance_config_add'),
url(r'^relevance_conf/check/$', FeatureRelevanceConfig.as_view(), name='feature_relevance_config_check'),
url(r'^remote_conf/show/(?P<data_identity>\w+)/(?P<page>\d+)/$', RemoteConfig.as_view(), name='remote_config_show'),
url(r'^remote_conf/update/(?P<id>\w+)/$', RemoteConfig.as_view(), name='remote_config_update'),
url(r'^remote_conf/add/$', RemoteConfig.as_view(), name='remote_config_add'),
url(r'^feature_process/show/(?P<featurename>\w+)/(?P<page>\d+)/$', FeatureProcessAPI.as_view(),
name='feature_process_get'),
url(r'^feature_process/test/$', FeatureProcessAPI.as_view(), name='feature_process_test'),
url(r'^feature_process/write/$', FeatureProcessAPI.as_view(), name='feature_process_write'),
url(r'^feature_process/delete/$', FeatureProcessAPI.as_view(), name='feature_process_delete'),
url(r'^get_list/(?P<item>\w+)/$', GetItemList.as_view(), name='get_list'),
url(r'^pre_filed_conf/show/(?P<fieldname>\w+)/(?P<page>\d+)/$', PreFieldInfoConfig.as_view(),
name='pre_filed_conf_show'),
url(r'^pre_filed_conf/update/(?P<fieldid>\d+)/$', PreFieldInfoConfig.as_view(), name='pre_filed_conf_update'),
url(r'^pre_filed_conf/add/$', PreFieldInfoConfig.as_view(), name='pre_filed_conf_add'),
url(r'^type_info/show/(?P<item>\w+)/(?P<page>\d+)/$', TypeInfoConfig.as_view(), name='type_conf_show'),
url(r'^type_info/update/(?P<item>\w+)/(?P<id>\d+)/$', TypeInfoConfig.as_view(), name='type_conf_update'),
url(r'^type_info/add/(?P<item>\w+)/$', TypeInfoConfig.as_view(), name='type_conf_add'),
url(r'^mapcode/show/(?P<featurename>\w+)/(?P<page>\d+)/$', MapCodeConfig.as_view(), name='mapcode_show'),
url(r'^mapcode/update/(?P<id>\d+)/$', MapCodeConfig.as_view(), name='mapcode_update'),
url(r'^mapcode/add/$', MapCodeConfig.as_view(), name='mapcode_add'),
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : No2MaxWater.py
@Time : 2021/02/04 20:59:23
@Author : kangqing
@Contact : kangqing.37@gmail.com
@Software: VS Code
@Desc : 盛最多水的容器,双指针
'''
from typing import List
# here put the import lib
class Solution:
def maxArea(self, height: List[int]) -> int:
# 最大盛水量,左指针, 右指针
count, left, right = -1, 0, len(height) - 1
lvalue, rvalue = height[0], height[len(height) - 1]
while left < right:
count = max(count, min(height[left], height[right]) * (right - left))
if height[left] < height[right]:
left += 1
while left < right and height[left] <= lvalue:
left += 1
lvalue = height[left]
else:
right -= 1
while left < right and height[right] <= rvalue:
right -= 1
rvalue = height[right]
return count
if __name__ == '__main__':
s = Solution()
print(s.maxArea([1,8,6,2,5,4,8,3,7]))
|
from rsp1570serial.commands import encode_command, encode_volume_direct_command
import unittest
class RotelTestCommands(unittest.TestCase):
def test_encode_power_toggle(self):
self.assertEqual(encode_command("POWER_TOGGLE"), b"\xfe\x03\xa3\x10\x0a\xc0")
def test_encode_mute_toggle(self):
self.assertEqual(encode_command("MUTE_TOGGLE"), b"\xfe\x03\xa3\x10\x1e\xd4")
def test_meta_encoding_of_escape_byte(self):
self.assertEqual(encode_command("VOLUME_40"), b"\xfe\x03\xa3\x30\x28\xfd\x01")
def test_meta_encoding_of_start_byte(self):
self.assertEqual(
encode_command("ZONE_3_VOLUME_36"), b"\xfe\x03\xa3\x33\x24\xfd\x00"
)
def test_volume_direct_command(self):
self.assertEqual(
encode_volume_direct_command(3, 36), b"\xfe\x03\xa3\x33\x24\xfd\x00"
)
def test_volume_direct_command2(self):
self.assertEqual(
encode_volume_direct_command(4, 80), b"\xfe\x03\xa3\x34\x50\x2a"
)
self.assertEqual(
encode_volume_direct_command(4, 95), b"\xfe\x03\xa3\x34\x5f\x39"
) # Wrong in protocol spec
self.assertEqual(
encode_volume_direct_command(4, 96), b"\xfe\x03\xa3\x34\x60\x3a"
) # Wrong in protocol spec
def test_make_invalid_command(self):
with self.assertRaises(KeyError):
encode_command("INVALID_COMMAND")
def test_make_invalid_volume_direct_commands(self):
with self.assertRaises(ValueError):
encode_volume_direct_command(5, 50)
with self.assertRaises(ValueError):
encode_volume_direct_command(1, -1)
with self.assertRaises(ValueError):
encode_volume_direct_command(1, 97)
|
def mystery():
num = 10 * 3
if num == 10:
print("Condition 10")
num = num * 10
elif num == 30:
print("Condition 30")
num = num * 30
print(f'num was {num}')
return num
print(mystery())
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bizonet_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
from gears import Gear
import os
OUTPUT_DIR = 'output'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
gList = range(3,200+1) + range(220,500+10,10) + range(550,1050,50)
for i in gList:
try:
print "Generating %i tooth gear..." % i
g = Gear(numTeeth = i)
fname = './output/gear%i.dxf' % i
g.render2DXF(fname)
except KeyboardInterrupt:
print "Stopped."
break
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
XLNetConfig,
XLNetModel,
XLNetLMHeadModel,
XLNetForMultipleChoice,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
)
from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST
class XLNetModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
mem_len=10,
clamp_len=-1,
reuse_len=15,
is_training=True,
use_labels=True,
vocab_size=99,
cutoffs=[10, 50, 80],
hidden_size=32,
num_attention_heads=4,
d_inner=128,
num_hidden_layers=5,
type_sequence_label_size=2,
untie_r=True,
bi_data=False,
same_length=False,
initializer_range=0.05,
seed=1,
type_vocab_size=2,
bos_token_id=1,
eos_token_id=2,
pad_token_id=5,
num_choices=4,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.mem_len = 10
# self.key_len = seq_length + mem_len
self.clamp_len = -1
self.reuse_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.num_attention_heads = 4
self.d_inner = 128
self.num_hidden_layers = 5
self.type_sequence_label_size = 2
self.untie_r = True
self.bi_data = False
self.same_length = False
self.initializer_range = 0.05
self.seed = 1
self.type_vocab_size = 2
self.bos_token_id = 1
self.eos_token_id = 2
self.pad_token_id = 5
self.num_choices = 4
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size)
perm_mask = torch.zeros(
self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float, device=torch_device,
)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros(self.batch_size, 1, self.seq_length + 1, dtype=torch.float, device=torch_device,)
target_mapping[:, 0, -1] = 1.0 # predict last token
sequence_labels = None
lm_labels = None
is_impossible_labels = None
token_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
token_labels = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = XLNetConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
n_head=self.num_attention_heads,
d_inner=self.d_inner,
n_layer=self.num_hidden_layers,
untie_r=self.untie_r,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
same_length=self.same_length,
reuse_len=self.reuse_len,
bi_data=self.bi_data,
initializer_range=self.initializer_range,
num_labels=self.type_sequence_label_size,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
return_dict=True,
)
return (
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_and_check_xlnet_base_model(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1, input_mask=input_mask)
result = model(input_ids_1, attention_mask=input_mask)
result = model(input_ids_1, token_type_ids=segment_ids)
result = model(input_ids_1)
config.mem_len = 0
model = XLNetModel(config)
model.to(torch_device)
model.eval()
base_model_output = model(input_ids_1)
self.parent.assertEqual(len(base_model_output), 2)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_model_use_cache(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
causal_mask = torch.ones(
input_ids_1.shape[0], input_ids_1.shape[1], input_ids_1.shape[1], dtype=torch.float, device=torch_device,
)
causal_mask = torch.triu(causal_mask, diagonal=0)
outputs_cache = model(input_ids_1, use_cache=True, perm_mask=causal_mask)
outputs_no_cache = model(input_ids_1, use_cache=False, perm_mask=causal_mask)
outputs_conf = model(input_ids_1)
self.parent.assertTrue(len(outputs_cache) == len(outputs_conf))
self.parent.assertTrue(len(outputs_cache) == len(outputs_no_cache) + 1)
output, mems = outputs_cache.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids_1, next_tokens], dim=-1)
# causal mask
causal_mask = torch.ones(
input_ids_1.shape[0],
input_ids_1.shape[1] + 1,
input_ids_1.shape[1] + 1,
dtype=torch.float,
device=torch_device,
)
causal_mask = torch.triu(causal_mask, diagonal=0)
single_mask = torch.ones(input_ids_1.shape[0], 1, 1, dtype=torch.float, device=torch_device)
# second forward pass
output_from_no_past = model(next_input_ids, perm_mask=causal_mask)["last_hidden_state"]
output_from_past = model(next_tokens, mems=mems, perm_mask=single_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_xlnet_base_model_with_att_output(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
attentions = model(input_ids_1, target_mapping=target_mapping, output_attentions=True)["attentions"]
self.parent.assertEqual(len(attentions), config.n_layer)
self.parent.assertIsInstance(attentions[0], tuple)
self.parent.assertEqual(len(attentions[0]), 2)
self.parent.assertTrue(attentions[0][0].shape, attentions[0][0].shape)
def create_and_check_xlnet_lm_head(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetLMHeadModel(config)
model.to(torch_device)
model.eval()
result1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels)
result2 = model(input_ids_2, token_type_ids=segment_ids, labels=lm_labels, mems=result1["mems"])
_ = model(input_ids_q, perm_mask=perm_mask, target_mapping=target_mapping)
self.parent.assertEqual(result1.loss.shape, ())
self.parent.assertEqual(result1.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result1["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertEqual(result2.loss.shape, ())
self.parent.assertEqual(result2.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result2["mems"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_qa(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result_with_labels = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
result_with_labels = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
total_loss, mems = result_with_labels.to_tuple()
result_with_labels = model(input_ids_1, start_positions=sequence_labels, end_positions=sequence_labels,)
total_loss, mems = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_token_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result = model(input_ids_1, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.type_sequence_label_size))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_sequence_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result = model(input_ids_1, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_torch
class XLNetModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
XLNetModel,
XLNetLMHeadModel,
XLNetForTokenClassification,
XLNetForSequenceClassification,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForMultipleChoice,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (
(XLNetLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
test_pruning = False
def setUp(self):
self.model_tester = XLNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlnet_base_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs)
def test_xlnet_base_model_use_cache(self):
# checking that in auto-regressive mode, `use_cache` gives the same results
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_model_use_cache(*config_and_inputs)
def test_xlnet_base_model_with_att_output(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model_with_att_output(*config_and_inputs)
def test_xlnet_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs)
def test_xlnet_sequence_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
def test_xlnet_token_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_token_classif(*config_and_inputs)
def test_xlnet_qa(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XLNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class XLNetModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_xlnet_base_cased(self):
model = XLNetLMHeadModel.from_pretrained("xlnet-base-cased")
model.to(torch_device)
input_ids = torch.tensor(
[
[
67,
2840,
19,
18,
1484,
20,
965,
29077,
8719,
1273,
21,
45,
273,
17,
10,
15048,
28,
27511,
21,
4185,
11,
41,
2444,
9,
32,
1025,
20,
8719,
26,
23,
673,
966,
19,
29077,
20643,
27511,
20822,
20643,
19,
17,
6616,
17511,
18,
8978,
20,
18,
777,
9,
19233,
1527,
17669,
19,
24,
673,
17,
28756,
150,
12943,
4354,
153,
27,
442,
37,
45,
668,
21,
24,
256,
20,
416,
22,
2771,
4901,
9,
12943,
4354,
153,
51,
24,
3004,
21,
28142,
23,
65,
20,
18,
416,
34,
24,
2958,
22947,
9,
1177,
45,
668,
3097,
13768,
23,
103,
28,
441,
148,
48,
20522,
19,
12943,
4354,
153,
12860,
34,
18,
326,
27,
17492,
684,
21,
6709,
9,
8585,
123,
266,
19,
12943,
4354,
153,
6872,
24,
3004,
20,
18,
9225,
2198,
19,
12717,
103,
22,
401,
24,
6348,
9,
12943,
4354,
153,
1068,
2768,
2286,
19,
33,
104,
19,
176,
24,
9313,
19,
20086,
28,
45,
10292,
9,
4,
3,
]
],
dtype=torch.long,
device=torch_device,
)
# In 1991, the remains of Russian Tsar Nicholas II and his family
# (except for Alexei and Maria) are discovered.
# The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
# remainder of the story. 1883 Western Siberia,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic.
# Rasputin has a vision and denounces one of the men as a horse thief. Although his
# father initially slaps him for making such an accusation, Rasputin watches as the
# man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
# the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
# with people, even a bishop, begging for his blessing. """
expected_output_ids = [
67,
2840,
19,
18,
1484,
20,
965,
29077,
8719,
1273,
21,
45,
273,
17,
10,
15048,
28,
27511,
21,
4185,
11,
41,
2444,
9,
32,
1025,
20,
8719,
26,
23,
673,
966,
19,
29077,
20643,
27511,
20822,
20643,
19,
17,
6616,
17511,
18,
8978,
20,
18,
777,
9,
19233,
1527,
17669,
19,
24,
673,
17,
28756,
150,
12943,
4354,
153,
27,
442,
37,
45,
668,
21,
24,
256,
20,
416,
22,
2771,
4901,
9,
12943,
4354,
153,
51,
24,
3004,
21,
28142,
23,
65,
20,
18,
416,
34,
24,
2958,
22947,
9,
1177,
45,
668,
3097,
13768,
23,
103,
28,
441,
148,
48,
20522,
19,
12943,
4354,
153,
12860,
34,
18,
326,
27,
17492,
684,
21,
6709,
9,
8585,
123,
266,
19,
12943,
4354,
153,
6872,
24,
3004,
20,
18,
9225,
2198,
19,
12717,
103,
22,
401,
24,
6348,
9,
12943,
4354,
153,
1068,
2768,
2286,
19,
33,
104,
19,
176,
24,
9313,
19,
20086,
28,
45,
10292,
9,
4,
3,
19,
12943,
4354,
153,
27,
442,
22,
2771,
4901,
9,
69,
27,
442,
22,
2771,
24,
11335,
20,
18,
9225,
2198,
9,
69,
27,
442,
22,
2771,
24,
11335,
20,
18,
9225,
2198,
9,
69,
27,
442,
22,
2771,
]
# In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria)
# are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich,
# narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin
# is asked by his father and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially slaps
# him for making such an accusation, Rasputin watches as the man is chased outside and beaten.
# Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing.
# <sep><cls>, Rasputin is asked to perform magic. He is asked to perform a ritual of the Virgin Mary.
# He is asked to perform a ritual of the Virgin Mary. He is asked to perform
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
|
from django.contrib import admin
from .models import *
from translations.admin import TranslatableAdmin, TranslationInline
admin.site.register(Numero)
admin.site.register(Aliado)
admin.site.register(Programa)
admin.site.register(Promocion)
admin.site.register(Alumni)
admin.site.register(Prensa)
admin.site.register(Publicacion)
admin.site.register(Recurso)
class ContinentAdmin(TranslatableAdmin):
inlines = [TranslationInline]
|
import logging
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import MultiLabelField
from allennlp.data.vocabulary import Vocabulary
class TestMultiLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
f = MultiLabelField([2, 3], skip_indexing=True, label_namespace="test1", num_labels=5)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().tolist()
assert tensor == [0, 0, 1, 1, 0]
assert {type(item) for item in tensor} == {int}
def test_multilabel_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("rel0", namespace="rel_labels")
vocab.add_token_to_namespace("rel1", namespace="rel_labels")
vocab.add_token_to_namespace("rel2", namespace="rel_labels")
f = MultiLabelField(["rel1", "rel0"], label_namespace="rel_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([1, 1, 0]))
def test_multilabel_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField(["non integer field"], skip_indexing=True)
def test_multilabel_field_raises_with_no_indexing_and_missing_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2], skip_indexing=True, num_labels=None)
def test_multilabel_field_raises_with_no_indexing_and_wrong_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2, 4], skip_indexing=True, num_labels=3)
def test_multilabel_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False)
def test_multilabel_field_raises_with_given_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False, num_labels=4)
def test_multilabel_field_empty_field_works(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("label1", namespace="test_empty_labels")
vocab.add_token_to_namespace("label2", namespace="test_empty_labels")
f = MultiLabelField([], label_namespace="test_empty_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
g = f.empty_field()
g.index(vocab)
tensor = g.as_tensor(g.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
h = MultiLabelField(
[0, 0, 1], label_namespace="test_empty_labels", num_labels=3, skip_indexing=True
)
tensor = h.empty_field().as_tensor(None).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0, 0]))
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.multilabel_field"):
assert "text" not in MultiLabelField._already_warned_namespaces
_ = MultiLabelField(["test"], label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test2"], label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test"], label_namespace="text2")
assert caplog
def test_printing_doesnt_crash(self):
field = MultiLabelField(["label"], label_namespace="namespace")
print(field)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import PolicyTagManagerClient
from .async_client import PolicyTagManagerAsyncClient
__all__ = (
"PolicyTagManagerClient",
"PolicyTagManagerAsyncClient",
)
|
from unittest import mock
import pytest
from laim import Laim
from laim.laim import TaskArguments
pytestmark = pytest.mark.integration
def test_handler(temp_config):
class Handler(Laim):
def handle_message(self, sender, recipients, message):
self.stop()
with mock.patch('laim.laim.drop_privileges'):
with mock.patch('laim.laim.LaimController'):
handler = Handler(config_file=temp_config)
handler.queue.put(TaskArguments(mock.Mock(), [], b''))
handler._start_worker()
assert handler.stop_event.is_set()
def test_crashing_handler(temp_config):
class Handler(Laim):
def handle_message(self, sender, recipients, message):
self.stop()
raise ValueError()
with mock.patch('laim.laim.drop_privileges'):
with mock.patch('laim.laim.LaimController'):
handler = Handler(config_file=temp_config)
handler.queue.put(TaskArguments(mock.Mock(), [], b''))
handler._start_worker()
# the worker thread shouldn't crash
assert handler.stop_event.is_set()
|
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("..")
from utils import *
from linear_regression import *
from svm import *
from softmax import *
from features import *
from kernel import *
#######################################################################
# 1. Introduction
#######################################################################
# Load MNIST data:
train_x, train_y, test_x, test_y = get_MNIST_data()
# Plot the first 20 images of the training set.
# plot_images(train_x[0:20, :])
#######################################################################
# 2. Linear Regression with Closed Form Solution
#######################################################################
# TODO: first fill out functions in linear_regression.py, or the below functions will not work
def run_linear_regression_on_MNIST(lambda_factor=0.01):
"""
Trains linear regression, classifies test data, computes test error on test set
Returns:
Final test error
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
train_x_bias = np.hstack([np.ones([train_x.shape[0], 1]), train_x])
test_x_bias = np.hstack([np.ones([test_x.shape[0], 1]), test_x])
theta = closed_form(train_x, train_y, lambda_factor)
test_error = compute_test_error_linear(test_x, test_y, theta)
return test_error
# Don't run this until the relevant functions in linear_regression.py have been fully implemented.
# print('Linear Regression test_error =', run_linear_regression_on_MNIST(lambda_factor=1))
#######################################################################
# 3. Support Vector Machine
#######################################################################
# TODO: first fill out functions in svm.py, or the below functions will not work
def run_svm_one_vs_rest_on_MNIST():
"""
Trains svm, classifies test data, computes test error on test set
Returns:
Test error for the binary svm
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
train_y[train_y != 0] = 1
test_y[test_y != 0] = 1
pred_test_y = one_vs_rest_svm(train_x, train_y, test_x)
test_error = compute_test_error_svm(test_y, pred_test_y)
return test_error
# print('SVM one vs. rest test_error:', run_svm_one_vs_rest_on_MNIST())
# for c in range(1, 10):
# print('SVM one vs. rest test_error:', run_svm_one_vs_rest_on_MNIST(c*3))
def run_multiclass_svm_on_MNIST():
"""
Trains svm, classifies test data, computes test error on test set
Returns:
Test error for the binary svm
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
pred_test_y = multi_class_svm(train_x, train_y, test_x)
test_error = compute_test_error_svm(test_y, pred_test_y)
return test_error
# print('Multiclass SVM test_error:', run_multiclass_svm_on_MNIST())
#######################################################################
# 4. Multinomial (Softmax) Regression and Gradient Descent
#######################################################################
# TODO: first fill out functions in softmax.py, or run_softmax_on_MNIST will not work
def run_softmax_on_MNIST(temp_parameter=1):
"""
Trains softmax, classifies test data, computes test error, and plots cost function
Runs softmax_regression on the MNIST training set and computes the test error using
the test set. It uses the following values for parameters:
alpha = 0.3
lambda = 1e-4
num_iterations = 150
Saves the final theta to ./theta.pkl.gz
Returns:
Final test error
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
theta, cost_function_history = softmax_regression(train_x, train_y, temp_parameter, alpha= 0.3, lambda_factor = 1.0e-4, k = 10, num_iterations = 150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_x, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
write_pickle_data(theta, "./theta.pkl.gz")
# TODO: add your code here for the "Using the Current Model" question in tab 4.
# and print the test_error_mod3
# _, test_y_mod3 = update_y(train_y, test_y)
# test_error_mod3 = compute_test_error_mod3(test_x, test_y_mod3, theta, temp_parameter)
return test_error
# print('softmax test_error=', run_softmax_on_MNIST(temp_parameter=1))
# TODO: Find the error rate for temp_parameter = [.5, 1.0, 2.0]
# Remember to return the tempParameter to 1, and re-run run_softmax_on_MNIST
#######################################################################
# 6. Changing Labels
#######################################################################
#pragma: coderesponse template
def run_softmax_on_MNIST_mod3(temp_parameter=1):
"""
Trains Softmax regression on digit (mod 3) classifications.
See run_softmax_on_MNIST for more info.
"""
#YOUR CODE HERE
# raise NotImplementedError
train_x, train_y, test_x, test_y = get_MNIST_data()
train_y, test_y = update_y(train_y, test_y)
theta, cost_function_history = softmax_regression(train_x, train_y, temp_parameter, alpha= 0.3, lambda_factor = 1.0e-4, k = 10, num_iterations = 150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_x, test_y, theta, temp_parameter)
#pragma: coderesponse end
return test_error
# TODO: Run run_softmax_on_MNIST_mod3(), report the error rate
# print('softmax_mod3 test_error=', run_softmax_on_MNIST_mod3(temp_parameter=1))
#######################################################################
# 7. Classification Using Manually Crafted Features
#######################################################################
## Dimensionality reduction via PCA ##
# TODO: First fill out the PCA functions in features.py as the below code depends on them.
def run_softmax_on_MNIST_pca():
n_components = 18
pcs = principal_components(train_x)
train_pca = project_onto_PC(train_x, pcs, n_components)
test_pca = project_onto_PC(test_x, pcs, n_components)
# train_pca (and test_pca) is a representation of our training (and test) data
# after projecting each example onto the first 18 principal components.
# TODO: Train your softmax regression model using (train_pca, train_y)
# and evaluate its accuracy on (test_pca, test_y).
temp_parameter = 1.0
theta, cost_function_history = softmax_regression(train_pca, train_y,\
temp_parameter, alpha= 0.3, lambda_factor = 1.0e-4, k = 10, num_iterations = 150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_pca, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
# write_pickle_data(theta, "./theta.pkl.gz")
# TODO: Use the plot_PC function in features.py to produce scatterplot
# of the first 100 MNIST images, as represented in the space spanned by the
# first 2 principal components found above.
plot_PC(train_x[range(100),], pcs, train_y[range(100)])
# TODO: Use the reconstruct_PC function in features.py to show
# the first and second MNIST images as reconstructed solely from
# their 18-dimensional principal component representation.
# Compare the reconstructed images with the originals.
firstimage_reconstructed = reconstruct_PC(train_pca[0, ], pcs, n_components, train_x)
plot_images(firstimage_reconstructed)
plot_images(train_x[0,])
secondimage_reconstructed = reconstruct_PC(train_pca[1, ], pcs, n_components, train_x)
plot_images(secondimage_reconstructed)
plot_images(train_x[1,])
return test_error
# print('softmax_pc_test_error=', run_softmax_on_MNIST_pca())
## Cubic Kernel ##
# TODO: Find the 10-dimensional PCA representation of the training and test set
def run_softmax_on_MNIST_cubic_kernel():
n_components = 10
pcs_10 = principal_components(train_x)
train_pca10 = project_onto_PC(train_x, pcs_10, n_components)
test_pca10 = project_onto_PC(test_x, pcs_10, n_components)
# TODO: First fill out cubicFeatures() function in features.py as the below code requires it.
train_cube = cubic_features(train_pca10)
test_cube = cubic_features(test_pca10)
# train_cube (and test_cube) is a representation of our training (and test) data
# after applying the cubic kernel feature mapping to the 10-dimensional PCA representations.
temp_parameter = 1.0
theta, cost_function_history = softmax_regression(train_cube, train_y,\
temp_parameter, alpha= 0.3, lambda_factor = 1.0e-4, k = 10, num_iterations = 150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_cube, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
# write_pickle_data(theta, "./theta.pkl.gz")
return test_error
# TODO: Train your softmax regression model using (train_cube, train_y)
# and evaluate its accuracy on (test_cube, test_y).
# print('softmax_pc_test_error=', run_softmax_on_MNIST_cubic_kernel())
|
__all__ = ["normal_platform"]
|
from .integration import *
from .integration_account import *
from .integration_application import *
from .integration_detail import *
from .utils import *
__all__ = (
*integration.__all__,
*integration_account.__all__,
*integration_application.__all__,
*integration_detail.__all__,
*utils.__all__,
)
|
""" __init__ """
from .legal_entity_name_generator2 import LegalEntityNameGenerator2
from .lei_generator import LeiGenerator
from .sic_range_generator import SicRangeGenerator
from .sic_code_generator import SicCodeGenerator
from .country_code_generator import CountryCodeGenerator
from .fund_name_generator import FundNameGenerator
from .company_name_faker import CompanyNameFaker
from .company_name_markov import CompanyNameMarkov
from .company_name_suffix import CompanyNameSuffixGenerator
from .normal_generator import NormalGenerator
from .address_generator import AddressGenerator
from .person_generator import PersonGenerator
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'audio_receiver/audio_receiver.gypi',
'video_receiver/video_receiver.gypi',
],
'targets': [
{
'target_name': 'cast_receiver',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
'<(DEPTH)/third_party/',
'<(DEPTH)/third_party/webrtc/',
],
'sources': [
'cast_receiver.h',
'cast_receiver_impl.cc',
'cast_receiver_impl.h',
], # source
'dependencies': [
'<(DEPTH)/crypto/crypto.gyp:crypto',
'cast_audio_receiver',
'cast_video_receiver',
'net/pacing/paced_sender.gyp:cast_paced_sender',
'rtp_receiver/rtp_receiver.gyp:cast_rtp_receiver',
],
},
],
}
|
"""
next dashboard.py
author: Lalit Jain, lalitkumarj@gmail.com
last updated: 9/16/15
Flask controller for dashboards.
"""
import os
import json
import yaml
from flask import Blueprint, render_template, url_for, request, jsonify
from jinja2 import Environment, PackageLoader, ChoiceLoader
import requests
import next.broker.broker
import next.constants as constants
import next.database_client.PermStore as PermStore
from next.api.resource_manager import ResourceManager
import next.api.api_util as api_util
import next.utils as utils
# Declare this as the dashboard blueprint
dashboard = Blueprint('dashboard',
__name__,
template_folder='templates',
static_folder='static')
rm = ResourceManager()
db = PermStore.PermStore()
broker = next.broker.broker.JobBroker()
import next.apps.Butler as Butler
Butler = Butler.Butler
# add database commands
dashboard_interface = api_util.NextBackendApi(dashboard)
from next.dashboard.database import DatabaseBackup, DatabaseRestore
dashboard_interface.add_resource(DatabaseBackup,'/database/databasebackup', endpoint='databasebackup')
dashboard_interface.add_resource(DatabaseRestore,'/database/databaserestore', endpoint='databaserestore')
@dashboard.route('/experiment_list')
def experiment_list():
"""
Endpoint that renders a page with a simple list of all experiments.
"""
# Experiments set
experiments = []
for app_id in rm.get_app_ids():
for exp_uid in rm.get_app_exp_uids(app_id):
start_date = rm.get_app_exp_uid_start_date(exp_uid)
try:
experiments.append({'exp_uid': exp_uid,
'app_id': app_id,
'start_date': start_date,
'num_participants':len(rm.get_participant_uids(exp_uid)),
})
except IndexError as e:
print e
pass
host_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
constants.NEXT_BACKEND_GLOBAL_PORT)
if constants.SITE_KEY:
dashboard_url='{}/dashboard/{}'.format(host_url, constants.SITE_KEY)
else:
dashboard_url='{}/dashboard'.format(host_url)
return render_template('experiment_list.html',
dashboard_url=dashboard_url,
experiments = reversed(experiments))
@dashboard.route('/get_stats', methods=['POST'])
def get_stats():
args_dict = request.json
exp_uid = args_dict['exp_uid']
app_id = rm.get_app_id(exp_uid)
response_json,didSucceed,message = broker.dashboardAsync(app_id,exp_uid,args_dict)
response_dict = json.loads(response_json,parse_float=lambda o:round(float(o),4))
response_json = json.dumps(response_dict)
return response_json
@dashboard.route('/system_monitor')
def system_monitor():
"""
Endpoint that renders a page with a simple list of all monitoring.
"""
host_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
constants.NEXT_BACKEND_GLOBAL_PORT)
if constants.SITE_KEY:
dashboard_url='{}/dashboard/{}'.format(host_url, constants.SITE_KEY)
else:
dashboard_url='{}/dashboard'.format(host_url)
rabbit_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
15672)
cadvisor_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
8888)
mongodb_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
28017)
return render_template('system_monitor.html',
dashboard_url=dashboard_url,
rabbit_url=rabbit_url,
cadvisor_url=cadvisor_url,
mongodb_url=mongodb_url)
@dashboard.route('/experiment_dashboard/<exp_uid>/<app_id>')
def experiment_dashboard(exp_uid, app_id):
"""
Endpoint that renders the experiment dashboard.
Inputs: ::\n
(string) exp_uid, exp_uid for a current experiment.
"""
simple_flag = int(request.args.get('simple',0))
force_recompute = int(request.args.get('force_recompute',1))
# Not a particularly good way to do this.
alg_label_list = rm.get_algs_for_exp_uid(exp_uid)
alg_list = [{'alg_label':alg['alg_label'],
'alg_label_clean':'_'.join(alg['alg_label'].split())}
for alg in alg_label_list]
host_url = 'http://{}:{}'.format(constants.NEXT_BACKEND_GLOBAL_HOST,
constants.NEXT_BACKEND_GLOBAL_PORT)
if constants.SITE_KEY:
dashboard_url='{}/dashboard/{}'.format(host_url, constants.SITE_KEY)
else:
dashboard_url='{}/dashboard'.format(host_url)
env = Environment(loader=ChoiceLoader([PackageLoader('apps.{}'.format(app_id),
'dashboard'),
PackageLoader('next.dashboard',
'templates')]))
template = env.get_template('myAppDashboard.html'.format(app_id)) # looks for /next/apps/{{ app_id }}/dashboard/{{ app_id }}.html
return template.render(app_id=app_id,
exp_uid=exp_uid,
alg_list=alg_list,
host_url=host_url,
dashboard_url=dashboard_url,
exceptions_present=exceptions_present(exp_uid, host_url),
url_for=url_for,
simple_flag=int(simple_flag),
force_recompute=int(force_recompute))
def exceptions_present(exp_uid, host_url):
url = '{}/api/experiment/{}/logs/APP-EXCEPTION'.format(host_url, exp_uid)
r = requests.get(url)
logs = yaml.load(r.content)['log_data']
return True if len(logs) > 0 else False
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.common.exceptions import WebDriverException
from selenium import webdriver
import os
import time
from datetime import date
MAX_WAIT = 20
def wait(fn):
def modified_fn(*args, **kwargs):
start_time = time.time()
while True:
try:
return fn(*args, **kwargs)
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
return modified_fn
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.staging_server = os.environ.get('STAGING_SERVER')
if self.staging_server:
from .server_tools import reset_database
self.live_server_url = 'http://' + self.staging_server
reset_database(self.staging_server)
def tearDown(self):
self.browser.quit()
super().tearDown()
@wait
def wait_for(self, fn):
return fn()
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BlackHatTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import logging
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_basic.py', # ~ 498 sec
'wallet_backup.py', # ~ 477 sec
'wallet_reorgsrestore.py', # ~ 391 sec
'mempool_persist.py', # ~ 417 sec
# vv Tests less than 5m vv
'wallet_hd.py', # ~ 300 sec
'wallet_zapwallettxes.py', # ~ 300 sec
'p2p_time_offset.py', # ~ 267 sec
'rpc_fundrawtransaction.py', # ~ 227 sec
'mining_pos_coldStaking.py', # ~ 220 sec
'wallet_import_rescan.py', # ~ 204 sec
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'p2p_invalid_block.py', # ~ 213 sec
'p2p_addr_relay.py',
'p2p_addrv2_relay.py',
'p2p_invalid_messages.py',
'feature_reindex.py', # ~ 205 sec
'feature_logging.py', # ~ 195 sec
'wallet_multiwallet.py', # ~ 190 sec
'wallet_abandonconflict.py', # ~ 188 sec
'feature_blockindexstats.py', # ~ 167 sec
'wallet_importmulti.py', # ~ 157 sec
'wallet_keypool_topup.py', # ~ 153 sec
'rpc_spork.py', # ~ 144 sec
'wallet_txn_doublespend.py --mineblock', # ~ 143 sec
'wallet_txn_clone.py --mineblock', # ~ 143 sec
'feature_block.py', # ~ 140 sec
'feature_proxy.py', # ~ 138 sec
'rpc_rawtransaction.py', # ~ 134 sec
'mining_pos_reorg.py', # ~ 128 sec
'feature_uacomment.py', # ~ 125 sec
'interface_rest.py', # ~ 120 sec
# vv Tests less than 2m vv
'wallet_upgrade.py', # ~ 119 sec
'p2p_disconnect_ban.py', # ~ 118 sec
'interface_http.py', # ~ 105 sec
'feature_blockhashcache.py', # ~ 100 sec
'p2p_invalid_tx.py', # ~ 98 sec
'wallet_listtransactions.py', # ~ 97 sec
'wallet_listreceivedby.py', # ~ 94 sec
'mining_pos_fakestake.py', # ~ 94 sec
'mempool_reorg.py', # ~ 92 sec
'interface_zmq.py', # ~ 90 sec
'wallet_encryption.py', # ~ 89 sec
'wallet_import_stakingaddress.py', # ~ 88 sec
'wallet_keypool.py', # ~ 88 sec
'feature_blocksdir.py', # ~ 85 sec
'feature_config_args.py', # ~ 85 sec
'wallet_dump.py', # ~ 83 sec
'rpc_net.py', # ~ 83 sec
'rpc_bip38.py', # ~ 82 sec
'rpc_deprecated.py', # ~ 80 sec
'interface_bitcoin_cli.py', # ~ 80 sec
'mempool_packages.py', # ~ 63 sec
# vv Tests less than 60s vv
'rpc_users.py',
'wallet_labels.py', # ~ 57 sec
'rpc_signmessage.py', # ~ 54 sec
'mempool_resurrect.py', # ~ 51 sec
'rpc_budget.py', # ~ 50 sec
'mempool_spend_coinbase.py', # ~ 50 sec
'rpc_signrawtransaction.py', # ~ 50 sec
'rpc_decodescript.py', # ~ 50 sec
'rpc_blockchain.py', # ~ 50 sec
'wallet_resendwallettransactions.py',
'feature_asmap.py',
'wallet_disable.py', # ~ 50 sec
'wallet_autocombine.py', # ~ 49 sec
'mining_v5_upgrade.py', # ~ 48 sec
'p2p_mempool.py', # ~ 46 sec
'rpc_named_arguments.py', # ~ 45 sec
'feature_filelock.py',
'feature_help.py', # ~ 30 sec
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
# 'mempool_limit.py', # We currently don't limit our mempool_reorg
# 'rpc_getchaintips.py',
# 'mining_prioritisetransaction.py',
# 'mining_basic.py',
# 'wallet_bumpfee.py',
# 'wallet_listsinceblock.py',
# 'p2p_leak.py',
# 'feature_cltv.py',
# 'feature_minchainwork.py',
# 'p2p_fingerprint.py',
# 'p2p_unrequested_blocks.py',
]
TIERTWO_SCRIPTS = [
# Longest test should go first, to favor running tests in parallel
'tiertwo_governance_sync_basic.py', # ~ 445 sec
'tiertwo_mn_compatibility.py', # ~ 413 sec
'tiertwo_deterministicmns.py', # ~ 366 sec
'tiertwo_governance_reorg.py', # ~ 361 sec
'tiertwo_masternode_activation.py', # ~ 352 sec
'tiertwo_masternode_ping.py', # ~ 293 sec
'tiertwo_reorg_mempool.py', # ~ 97 sec
'tiertwo_governance_invalid_budget.py',
]
SAPLING_SCRIPTS = [
# Longest test should go first, to favor running tests in parallel
'sapling_key_import_export.py', # ~ 378 sec
'sapling_wallet.py', # ~ 350 sec
'sapling_wallet_anchorfork.py', # ~ 345 sec
'sapling_wallet_nullifiers.py', # ~ 190 sec
'sapling_wallet_listreceived.py', # ~ 157 sec
'sapling_changeaddresses.py', # ~ 151 sec
'sapling_wallet_send.py', # ~ 126 sec
'sapling_mempool.py', # ~ 98 sec
'sapling_wallet_persistence.py', # ~ 90 sec
'sapling_supply.py', # ~ 58 sec
'sapling_malleable_sigs.py', # ~ 44 sec
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
# vv Tests less than 20m vv
'feature_dbcrash.py',
'sapling_fillblock.py', # ~ 780 sec
'feature_fee_estimation.py', # ~ 360 sec
# vv Tests less than 5m vv
# vv Tests less than 2m vv
#'p2p_timeouts.py',
# vv Tests less than 60s vv
#'p2p_feefilter.py',
'feature_abortnode.py',
# vv Tests less than 30s vv
#'example_test.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
LEGACY_SKIP_TESTS = [
# These tests are not run when the flag --legacywallet is used
'feature_block.py',
'feature_blockindexstats.py',
'feature_config_args.py',
'feature_help.py',
'feature_logging.py',
'feature_reindex.py',
'feature_proxy.py',
'feature_uacomment.py',
'interface_bitcoin_cli.py',
'interface_http.py',
'interface_rest.py',
'mempool_reorg.py',
'mempool_resurrect.py',
'mempool_spend_coinbase.py',
'p2p_disconnect_ban.py',
'p2p_time_offset.py',
'rpc_bip38.py',
'rpc_blockchain.py',
'rpc_budget.py',
'rpc_decodescript.py',
'rpc_fundrawtransaction.py',
'rpc_net.py',
'rpc_signmessage.py',
'rpc_spork.py',
'rpc_users.py',
'wallet_hd.py', # no HD tests for pre-HD wallets
'wallet_upgrade.py', # can't upgrade to pre-HD wallet
'sapling_wallet_persistence.py',
'sapling_wallet.py',
'sapling_changeaddresses.py',
'sapling_key_import_export.py',
'sapling_wallet_anchorfork.py',
'sapling_wallet_listreceived.py',
'sapling_wallet_nullifiers.py',
'sapling_mempool.py',
'wallet_importmulti.py',
'wallet_import_rescan.py',
'wallet_multiwallet.py',
]
# Place the lists with the longest tests (on average) first
ALL_SCRIPTS = EXTENDED_SCRIPTS + TIERTWO_SCRIPTS + SAPLING_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--all', '-a', action='store_true', help='run all available tests (overrides other flags)')
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--skipcache', '-s', action='store_true', help='do NOT create a cache with the test run (tests that make use of the cache will fail). Takes precedence over --keepcache')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--legacywallet', '-w', action='store_true', help='create pre-HD wallets only')
parser.add_argument('--tiertwo', '-m', action='store_true', help='run tier two tests only')
parser.add_argument('--sapling', '-z', action='store_true', help='run sapling tests only')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
if args.legacywallet:
passon_args.append("--legacywallet")
if args.tiertwo:
passon_args.append("--tiertwo")
if args.sapling:
passon_args.append("--sapling")
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/blkc_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_blkcd = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_blkcd):
print("No functional tests to run. Wallet, utils, and blkcd must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if args.all:
test_list = ALL_SCRIPTS
else:
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub(r"\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
test_list = []
if args.tiertwo:
test_list += TIERTWO_SCRIPTS
if args.sapling:
test_list += SAPLING_SCRIPTS
if len(test_list) == 0:
# No individual tests (or sub-list) have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
# If --legacywallet, remove extra test cases
if args.legacywallet:
test_list = [x for x in test_list if x not in LEGACY_SKIP_TESTS]
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
config["environment"]["EXEEXT"],
tmpdir,
args.jobs, args.coverage,
passon_args, args.combinedlogslen,
"skip" if args.skipcache else ("keep" if args.keepcache else "rewrite"))
# keep_cache can either be
# - "rewrite" : (default) Delete cache directory and recreate it.
# - "keep" : Check if the cache in the directory is valid. Recreate only if invalid.
# - "skip" : Don' check the contents of the cache and don't create a new one
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0, keep_cache="rewrite"):
# Warn if blkcd is already running (unix only)
try:
if subprocess.check_output(["pidof", "blkcd"]) is not None:
print("%sWARNING!%s There is already a blkcd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/blkcd' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/blkc-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
# Send a ping message every 5 minutes to not get stalled on Travis.
import threading
pingTime = 5 * 60
stopTimer = False
def pingTravis():
if stopTimer:
return
print("- Creating cache in progress...")
sys.stdout.flush()
threading.Timer(pingTime, pingTravis).start()
if keep_cache == "rewrite":
pingTravis()
if keep_cache != "skip":
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
finally:
stopTimer = True
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
if stderr == "":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
else:
logging.debug("%s passed (with warnings), Duration: %s s" % (done_str, test_result.time))
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie blkcds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED:
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|sapling|tiertwo)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `blkc-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
"""Primitive dict ops."""
from mypyc.ir.ops import ERR_FALSE, ERR_MAGIC, ERR_NEVER, ERR_NEG_INT
from mypyc.ir.rtypes import (
dict_rprimitive, object_rprimitive, bool_rprimitive, int_rprimitive,
list_rprimitive, dict_next_rtuple_single, dict_next_rtuple_pair, c_pyssize_t_rprimitive,
c_int_rprimitive
)
from mypyc.primitives.registry import (
method_op, simple_emit, c_custom_op, c_method_op, c_function_op, c_binary_op, load_address_op
)
# Get the 'dict' type object.
load_address_op(
name='builtins.dict',
type=object_rprimitive,
src='PyDict_Type')
# dict[key]
dict_get_item_op = c_method_op(
name='__getitem__',
arg_types=[dict_rprimitive, object_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetItem',
error_kind=ERR_MAGIC)
# dict[key] = value
dict_set_item_op = c_method_op(
name='__setitem__',
arg_types=[dict_rprimitive, object_rprimitive, object_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_SetItem',
error_kind=ERR_NEG_INT)
# key in dict
c_binary_op(
name='in',
arg_types=[object_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='PyDict_Contains',
error_kind=ERR_NEG_INT,
truncated_type=bool_rprimitive,
ordering=[1, 0])
# dict1.update(dict2)
dict_update_op = c_method_op(
name='update',
arg_types=[dict_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_Update',
error_kind=ERR_NEG_INT,
priority=2)
# Operation used for **value in dict displays.
# This is mostly like dict.update(obj), but has customized error handling.
dict_update_in_display_op = c_custom_op(
arg_types=[dict_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_UpdateInDisplay',
error_kind=ERR_NEG_INT)
# dict.update(obj)
c_method_op(
name='update',
arg_types=[dict_rprimitive, object_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_UpdateFromAny',
error_kind=ERR_NEG_INT)
# dict.get(key, default)
c_method_op(
name='get',
arg_types=[dict_rprimitive, object_rprimitive, object_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_Get',
error_kind=ERR_MAGIC)
# dict.get(key)
method_op(
name='get',
arg_types=[dict_rprimitive, object_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=simple_emit('{dest} = CPyDict_Get({args[0]}, {args[1]}, Py_None);'))
# Construct an empty dictionary.
dict_new_op = c_custom_op(
arg_types=[],
return_type=dict_rprimitive,
c_function_name='PyDict_New',
error_kind=ERR_MAGIC)
# Construct a dictionary from keys and values.
# Positional argument is the number of key-value pairs
# Variable arguments are (key1, value1, ..., keyN, valueN).
dict_build_op = c_custom_op(
arg_types=[c_pyssize_t_rprimitive],
return_type=dict_rprimitive,
c_function_name='CPyDict_Build',
error_kind=ERR_MAGIC,
var_arg_type=object_rprimitive)
# Construct a dictionary from another dictionary.
c_function_op(
name='builtins.dict',
arg_types=[dict_rprimitive],
return_type=dict_rprimitive,
c_function_name='PyDict_Copy',
error_kind=ERR_MAGIC,
priority=2)
# Generic one-argument dict constructor: dict(obj)
c_function_op(
name='builtins.dict',
arg_types=[object_rprimitive],
return_type=dict_rprimitive,
c_function_name='CPyDict_FromAny',
error_kind=ERR_MAGIC)
# dict.keys()
c_method_op(
name='keys',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_KeysView',
error_kind=ERR_MAGIC)
# dict.values()
c_method_op(
name='values',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_ValuesView',
error_kind=ERR_MAGIC)
# dict.items()
c_method_op(
name='items',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_ItemsView',
error_kind=ERR_MAGIC)
# list(dict.keys())
dict_keys_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Keys',
error_kind=ERR_MAGIC)
# list(dict.values())
dict_values_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Values',
error_kind=ERR_MAGIC)
# list(dict.items())
dict_items_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Items',
error_kind=ERR_MAGIC)
# PyDict_Next() fast iteration
dict_key_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetKeysIter',
error_kind=ERR_MAGIC)
dict_value_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetValuesIter',
error_kind=ERR_MAGIC)
dict_item_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetItemsIter',
error_kind=ERR_MAGIC)
dict_next_key_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_single,
c_function_name='CPyDict_NextKey',
error_kind=ERR_NEVER)
dict_next_value_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_single,
c_function_name='CPyDict_NextValue',
error_kind=ERR_NEVER)
dict_next_item_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_pair,
c_function_name='CPyDict_NextItem',
error_kind=ERR_NEVER)
# check that len(dict) == const during iteration
dict_check_size_op = c_custom_op(
arg_types=[dict_rprimitive, int_rprimitive],
return_type=bool_rprimitive,
c_function_name='CPyDict_CheckSize',
error_kind=ERR_FALSE)
dict_size_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=c_pyssize_t_rprimitive,
c_function_name='PyDict_Size',
error_kind=ERR_NEVER)
|
#!/idgo_venv/bin/python3
# Copyright (c) 2017-2019 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
from urllib.parse import parse_qs
from urllib.parse import urlparse
python_home = "/idgo_venv/"
activate_this = python_home + '/bin/activate_this.py'
exec(open(activate_this).read())
sys.path.append(python_home)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
import django # noqa: E402
django.setup()
from django.contrib.auth.models import User # noqa: E402
from django.db.models import Q # noqa: E402
from functools import reduce # noqa: E402
from idgo_admin.models import Dataset # noqa: E402
from idgo_admin.models import Organisation # noqa: E402
from idgo_admin.models import Resource # noqa: E402
from operator import ior # noqa: E402
logger = logging.getLogger('auth_ogc')
stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
# logger.setLevel(logging.DEBUG)
AUTHORIZED_PREFIX = ['/maps/', '/wfs/', '/wms/', '/wxs/']
# used for parsing address when basic auth is provided
PRIVATE_AUTHORIZED_PREFIX = ["/private{prefix}".format(prefix=p)
for p in AUTHORIZED_PREFIX]
def retrieve_resources_through_ows_url(url):
parsed_url = urlparse(url.lower())
qs = parse_qs(parsed_url.query)
if 'layers' in qs:
layers = qs.get('layers')[-1]
elif 'typename' in qs:
layers = qs.get('typename')[-1]
elif 'typenames' in qs:
layers = qs.get('typenames')[-1]
else:
layers = None
if not layers:
return None
layers = set(layers.replace(' ', '').split(','))
layers = [layer.split(':')[-1] for layer in layers]
datasets_filters = [
Q(slug__in=layers),
Q(organisation__in=Organisation.objects.filter(slug__in=layers).distinct()),
]
datasets = Dataset.objects.filter(reduce(ior, datasets_filters)).distinct()
resources_filters = [
Q(dataset__in=datasets),
Q(layer__name__in=layers),
]
resources = Resource.objects.filter(reduce(ior, resources_filters)).distinct()
return resources
def check_password(environ, user, password):
url = environ['REQUEST_URI']
logger.debug('Checking user %s rights to url %s', user, url)
# check path is authorized
is_path_authorized = False
for prefix in AUTHORIZED_PREFIX + PRIVATE_AUTHORIZED_PREFIX:
if url.startswith(prefix):
is_path_authorized = True
if not is_path_authorized:
logger.error("path '%s' is unauthorized", url)
return False
# Get Capabilities and metadata are always athorized
qs = parse_qs(urlparse(url.lower()).query)
request = qs.get('request')
logger.debug(qs)
public_requests = [
"getcapabilities",
"getmetadata",
"getlegendgraphic",
"describefeaturetype",
"describelayer",
"getstyles",
]
if request[-1] in public_requests:
logger.debug("URL request is public")
return True
try:
user = User.objects.get(username=user, is_active=True)
except User.DoesNotExist:
logger.debug("User %s does not exist (or is not active)" % user)
else:
if not user.check_password(password):
logger.error("User %s provided bad password", user)
return False
resources = retrieve_resources_through_ows_url(url)
if not resources:
logger.error("Unable to get resources")
return False
# Refuse query if one of the resources is not available/authorized
for resource in resources:
if resource.anonymous_access:
continue
if not resource.is_profile_authorized(user):
logger.error(
"Resource '{resource}' is not authorized to user '{user}'.".format(
resource=resource.pk, user=user.username))
return False
return True
if __name__ == '__main__':
while True:
try:
line = sys.stdin.readline().strip()
logger.debug("REMAP ogc auth: %s" % line)
headers = {"REQUEST_URI": line}
# Remove querystring (handled by apache)
path = line.split("?")[0]
# if ressource is accessible by anonymous => public,
# otherwise check password (=> private)
if check_password(headers, "", ""):
response = "http://localhost/public{uri}".format(uri=path)
else:
response = "http://localhost/private{uri}".format(uri=path)
logger.debug("response : %s" % response)
sys.stdout.write(response + '\n')
sys.stdout.flush()
except Exception as e:
logger.error(e)
sys.stdout.write('NULL\n')
sys.stdout.flush()
|
#!/usr/bin/env python2
#encoding: UTF-8
import json
import sys;sys.path.append('./')
import zipfile
import re
import sys
import os
import codecs
import traceback
import numpy as np
def order_points_clockwise(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def print_help():
sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0])
sys.exit(2)
def load_zip_file_keys(file,fileNameRegExp=''):
"""
Returns an array with the entries of the ZIP file that match with the regular expression.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive.')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp!="":
m = re.match(fileNameRegExp,name)
if m == None:
addFile = False
else:
if len(m.groups())>0:
keyName = m.group(1)
if addFile:
pairs.append( keyName )
return pairs
def load_zip_file(file,fileNameRegExp='',allEntries=False):
"""
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp!="":
m = re.match(fileNameRegExp,name)
if m == None:
addFile = False
else:
if len(m.groups())>0:
keyName = m.group(1)
if addFile:
pairs.append( [ keyName , archive.read(name)] )
else:
if allEntries:
raise Exception('ZIP entry not valid: %s' %name)
return dict(pairs)
def load_folder_file(file, fileNameRegExp='', allEntries=False):
"""
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
"""
pairs = []
for name in os.listdir(file):
addFile = True
keyName = name
if fileNameRegExp != "":
m = re.match(fileNameRegExp, name)
if m == None:
addFile = False
else:
if len(m.groups()) > 0:
keyName = m.group(1)
if addFile:
pairs.append([keyName, open(os.path.join(file,name)).read()])
else:
if allEntries:
raise Exception('ZIP entry not valid: %s' % name)
return dict(pairs)
def decode_utf8(raw):
"""
Returns a Unicode object on success, or None on failure
"""
try:
raw = codecs.decode(raw,'utf-8', 'replace')
#extracts BOM if exists
raw = raw.encode('utf8')
if raw.startswith(codecs.BOM_UTF8):
raw = raw.replace(codecs.BOM_UTF8, '', 1)
return raw.decode('utf-8')
except:
return None
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
This function validates that all lines of the file calling the Line validation function for each line
"""
utf8File = decode_utf8(file_contents)
if (utf8File is None) :
raise Exception("The file %s is not UTF-8" %fileName)
lines = utf8File.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
if(line != ""):
try:
validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
except Exception as e:
raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
"""
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
Returns values from a textline. Points , [Confidences], [Transcriptions]
"""
confidence = 0.0
transcription = "";
points = []
numPoints = 4;
if LTRB:
numPoints = 4;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
else:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if(xmax<xmin):
raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
if(ymax<ymin):
raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
if (imWidth>0 and imHeight>0):
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
else:
numPoints = 8;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
else:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
points = order_points_clockwise(np.array(points).reshape(-1, 2)).reshape(-1)
validate_clockwise_points(points)
if (imWidth>0 and imHeight>0):
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
if withConfidence:
try:
confidence = float(m.group(numPoints+1))
except ValueError:
raise Exception("Confidence value must be a float")
if withTranscription:
posTranscription = numPoints + (2 if withConfidence else 1)
transcription = m.group(posTranscription)
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
return points,confidence,transcription
def validate_point_inside_bounds(x,y,imWidth,imHeight):
if(x<0 or x>imWidth):
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
if(y<0 or y>imHeight):
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
def validate_clockwise_points(points):
"""
Validates that the points that the 4 points that dlimite a polygon are in clockwise order.
"""
if len(points) != 8:
raise Exception("Points list not valid." + str(len(points)))
point = [
[int(points[0]) , int(points[1])],
[int(points[2]) , int(points[3])],
[int(points[4]) , int(points[5])],
[int(points[6]) , int(points[7])]
]
edge = [
( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),
( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),
( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),
( point[0][0] - point[3][0])*( point[0][1] + point[3][1])
]
summatory = edge[0] + edge[1] + edge[2] + edge[3];
if summatory>0:
raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
"""
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
xmin,ymin,xmax,ymax,[confidence],[transcription]
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
"""
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
if(line != "") :
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
import numpy as np
sorted_ind = np.argsort(-np.array(confidencesList))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return pointsList,confidencesList,transcriptionsList
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
"""
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
Params:
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
"""
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
try:
# validate_data_fn(p['g'], p['s'], evalParams)
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
resDict.update(evalData)
except Exception as e:
traceback.print_exc()
resDict['Message']= str(e)
resDict['calculated']=False
if 'o' in p:
if not os.path.exists(p['o']):
os.makedirs(p['o'])
resultsOutputname = p['o'] + '/results.zip'
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
del resDict['per_sample']
if 'output_items' in resDict.keys():
del resDict['output_items']
outZip.writestr('method.json',json.dumps(resDict))
if not resDict['calculated']:
if show_result:
sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
if 'o' in p:
outZip.close()
return resDict
if 'o' in p:
if per_sample == True:
for k,v in evalData['per_sample'].iteritems():
outZip.writestr( k + '.json',json.dumps(v))
if 'output_items' in evalData.keys():
for k, v in evalData['output_items'].iteritems():
outZip.writestr( k,v)
outZip.close()
if show_result:
sys.stdout.write("Calculated!")
sys.stdout.write(json.dumps(resDict['method']))
return resDict
def main_validation(default_evaluation_params_fn,validate_data_fn):
"""
This process validates a method
Params:
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
"""
try:
p = dict([s[1:].split('=') for s in sys.argv[1:]])
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
validate_data_fn(p['g'], p['s'], evalParams)
print('SUCCESS')
sys.exit(0)
except Exception as e:
print(str(e))
sys.exit(101)
|
# coding=utf-8
"""Try to access the parameters page. feature tests."""
from functools import partial
from pytest_bdd import (
given,
scenario,
then,
when,
)
from pbraiders.pages.options.parameters import ParametersPage # pylint: disable=import-error
from pbraiders.pages.options.parameters.actions import HeadcountAction # pylint: disable=import-error
from pbraiders.pages.signin_utilities import sign_in # pylint: disable=import-error
scenario = partial(scenario, 'options/parameters/headcount.feature')
@scenario('Updating the headcounts')
def test_update_headcount():
"""Updating the headcounts."""
@scenario('Not using a valid value')
def test_value_not_valid():
"""Not using a valid value."""
@given('I want to update the headcount per month', target_fixture="page_parameters")
def page_parameters(the_browser, the_config, the_database) -> HeadcountAction:
"""I want to update the headcount per month."""
# Parameters page
p_page = ParametersPage(_driver=the_browser, _config=the_config['urls'])
if p_page.on_page() is False and p_page.visit() is False:
# Signin
assert sign_in(driver=the_browser, config=the_config, user="admin") is True
assert p_page.visit() is True
p_action = HeadcountAction(_page=p_page)
return p_action
@when('I update the headcounts')
def update_headcount(page_parameters) -> None:
"""I update the headcounts."""
page_parameters.fill_all().update()
@when('I update the headcounts with a non valid value')
def update_headcount_with_invalid_value(page_parameters) -> None:
"""I update the headcounts with a non valid value."""
page_parameters.fill(2, 'A').update()
@then('I should see the success message')
def success_merssage(page_parameters) -> None:
"""I should see the success message."""
assert page_parameters.has_succeeded() is True
@then('I should see the error message')
def error_merssage(page_parameters) -> None:
"""I should see the error message."""
assert page_parameters.has_failed() is True
@then('The update should be permanent')
def permanent_update(the_browser, the_config) -> None:
"""The update should be permanent."""
assert sign_in(driver=the_browser, config=the_config, user="admin") is True
p_page = ParametersPage(_driver=the_browser, _config=the_config['urls'])
assert p_page.visit() is True
p_action = HeadcountAction(_page=p_page)
assert p_action.check() is True
|
# coding=utf-8
import random
import re
import requests
from .base import Music
from .exception import MusicDoesnotExists
from mozart import config
__all__ = ["QQ"]
def get_guid():
return str(random.randrange(1000000000, 10000000000))
def operate_vkey(guid):
"""计算vkey"""
params = {"guid": guid, "format": "json", "json": 3}
s = requests.Session()
s.headers.update(config.fake_headers)
s.headers.update(
{"referer": "http://y.qq.com", "User-Agent": config.ios_ua}
)
r = s.get("http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg", params=params)
if r.status_code != requests.codes.ok:
raise Exception(r.text)
j = r.json()
if j["code"] != 0:
raise Exception(r.text)
return j["key"]
class QQ(Music):
def __init__(self, *args, **kwargs):
super(QQ, self).__init__(*args, **kwargs)
# 网易音乐的初始化
if not self.use_id:
self.music_id = self.get_music_id_from_url(self.real_url)
self.get_music_from_id()
print(self.__repr__()) # 打印输出
def get_music_from_id(self):
if self.music_id: # music_id合法才请求
self._get_music_info()
self._get_download_url()
def _get_download_url(self):
guid = get_guid()
vkey = operate_vkey(guid)
for prefix in ["M800", "M500", "C400"]:
url = "http://dl.stream.qqmusic.qq.com/%s%s.mp3?vkey=%s&guid=%s&fromtag=1" % (
prefix,
self.music_id,
vkey,
guid,
)
size = 0
try:
r = requests.get(
url,
stream=True,
headers=config.wget_header,
)
size = int(r.headers.get("Content-Length", 0))
# 转换成MB并保留两位小数
size = round(size / 1048576, 2)
except:
pass
if size > 0:
if prefix == "M800":
self._rate = 320
break
self._download_url = url
def _get_music_info(self):
"""
有些歌曲获取title和singer会失败
:return:
"""
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg'
params = {
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'data': '%7b%22songinfo%22%3a%7b%22method%22%3a%22get_song_detail_yqq%22%2c%22'
'param%22%3a%7b%22song_type%22%3a0%2c%22song_mid%22%3a%22{}%22%7d%2c%22'
'module%22%3a%22music.pf_song_detail_svr%22%7d%7d'.format(self.music_id),
}
resp = requests.get(url, params=params)
try:
data = resp.json()["songinfo"]["data"]["track_info"]
self._song = data["name"]
self._singer = data["singer"][0]["name"]
except Exception:
raise MusicDoesnotExists("音乐不存在,请检查")
@classmethod
def get_music_id_from_url(cls, url):
music_ids = re.findall(r'songmid=(.+?)&', url)
if music_ids:
return music_ids[0]
return ""
|
import cx_Oracle
import pandas as pd
import numpy as np
import calendar
import datetime
#==================================================================================================================
def add_months(sourcedate, months):
"""Función que permite sumar o restar 'months' meses a una fecha 'sourcedate' determinada.
El formato de 'sourcedate' es de la forma datetime.date(año, mes, dia)."""
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year,month)[1])
return datetime.date(year, month, day)
#==================================================================================================================
def datetime_to_integer(dt_time):
"""Función que permite cambiar el formato de una fecha 'dt_time' a un número entero.
El formato de 'dt_time' es datetime.date(año, mes, dia)"""
integer = 10000*dt_time.year + 100*dt_time.month + dt_time.day
return integer
#==================================================================================================================
def f_query(fi_int, ff_int, fecha_init, fecha_end):
"""Consulta Oracle PL-SQL a la base de datos de Naranja. Los argumentos 'fi_int' y 'ff_int' indican los extremos
del intervalo de tiempo para llevar a cabo el modelo RFM. Ambas fechas ingresan a la consulta como números enteros.
'fecha_init' representa lo mismo que 'fi_init' pero con un formato diferente (%d/%m/%Y)"""
query = ("""
select
a.dim_tiempos dim_tiempos,
a.dim_geo_comercios dim_geo_comercios,
a.rubro_descripcion rubro_descripcion,
b.comercio_id comercio_id,
b.comercio_codigo comercio_codigo,
b.comercio_descripcion comercio_descripcion,
b.cuit cuit,
b.rubro rubro,
b.fecha_contrato fecha_contrato,
b.fecha_baja fecha_baja,
b.tipo_venta tipo_venta,
b.origen origen,
b.provincia_descripcion provincia_descripcion,
a.importe importe,
a.moneda moneda,
a.fecha fecha
from
(select
f.dim_tiempos dim_tiempos,
f.dim_geo_comercios dim_geo_comercios,
r.rubro_descripcion rubro_descripcion,
f.met_importe importe,
f.atr_moneda moneda,
f.atr_fecha_presentacion fecha
from dw.dim_rubros r
inner join dw.fac_consumos_comercios f
on f.dim_rubros = r.dimension_key """
f'where f.DIM_TIEMPOS = {fi_int} ' #/*BETWEEN {ff_int} AND {fi_int} */
"""and (f.DIM_RUBROS <> 69 and f.DIM_RUBROS <> 27 and f.DIM_RUBROS <> 115 and f.DIM_RUBROS <> -1)
and f.ATR_DEBITO <> 'S'
and f.atr_fecha_presentacion <> to_date('00010101000000', 'YYYYMMDDHH24MISS') """
f"and f.atr_fecha_presentacion between to_date('{fecha_end}', 'DD/MM/YYYY') and to_date('{fecha_init}', 'DD/MM/YYYY')) a "
"""inner join
(select
dimension_key,
comercio_id,
comercio_codigo,
comercio_descripcion,
cuit,
rubro,
fecha_contrato,
fecha_baja,
tipo_venta,
origen,
provincia_descripcion
from dw.dim_geo_comercios ) b
on b.dimension_key = a.dim_geo_comercios
where b.origen = 'C' """
#/*--and b.comercio_descripcion <> 'MERCADO PAGO'*/
"""and b.fecha_baja = to_date('00010101000000', 'YYYYMMDDHH24MISS')
""")
return query
#=========================================================================================================================
def consulta_DW(query, cur, fecha_in, fecha_out):
"""Ejecuta la consulta al data Warehouse, genera un dataframe y lo guarda localmente. Necesita 4 argumentos:
'query', la consulta SQL, 'cur', un cursor, 'fecha_in' y 'fecha_out', strings de las fechas 'fi_int' y 'ff_int' para
poder etiquetar el nombre del dataframe guardado."""
cur.execute(query)
res = cur.fetchall()
columns = [c[0] for c in cur.description]
data = pd.DataFrame( data = res , columns=columns)
data.to_csv(f'{fecha_in}--{fecha_out}.csv', index = False)
return data
#=========================================================================================================================
|
"""
Author: Dustin Hines
Course: Computational Creativity Fall 2018
Project: M7: Playing with Words
Date: last modified 12/13
Description:
This module implements 3 classes:
1. Narrator -
the entity that maintains the story knowledge such as setting and
characters and keeps track of other meta data as the narrative
progresses
2. Event -
used an an object to store the time and event for a narrative
event.
3. Episode -
Structure that uses the narrator to generate a narrative of some
length.
Notes:
* output will be a text script of TV episodes (maybe build this up to
seasons?)
"""
import random
import knowledge as knows
import character
from random import choice
import os
NUM_CHARACTERS = 10
NUM_LOCATIONS = 5
RATE_LOCATION_CONNECTED = 1
NUM_ACTIONS = 25
NUM_SCRIPTS = 100
RESULTS_PATH = 'results'
RESULTS_NAME = 'script_'
class Narrator:
"""
Purpose: control the flow of the story / keep track of story information
Includes the following methods:
characters_at
can_act
change_location
init_locations
init_characters
"""
def __init__(self):
self.knowledge = knows.Knowledge()
self.locations = self.init_locations()
self.characters = self.init_characters()
self.current_location = choice(self.locations['names'])
self.social_network = character.SocialNetwork(self.characters)
self.story_over = False
def characters_at(self, location):
"""
Purpose: return a list of characters in a location.
"""
characters = []
for character in self.characters:
if character.location == location:
characters.append(character)
return characters
def can_act(self, nearby_characters):
"""
Purpose: return a sublist of characters than can actually make an
action.
"""
can_act = nearby_characters.copy()
for character in nearby_characters:
if not character.can_act(self.locations, self.characters,
nearby_characters, self.social_network):
can_act.remove(character)
if not character.alive:
can_act.remove(character)
return can_act
def change_location(self):
"""
Purpose: change the current_location to a new location.
"""
possibilities = self.locations['names'].copy()
# don't want to switch to the current location
possibilities.remove(self.current_location)
# don't want to focus on a location without any characters
with_characters = possibilities.copy()
for possibility in possibilities:
if self.characters_at(possibility) == []:
with_characters.remove(possibility)
if len(possibilities) == 0:
return 'There are no characters left.'
else:
new_location = choice(with_characters)
self.current_location = new_location
return 'We now shift our attention to {}'.format(self.current_location)
def init_locations(self, num_locations=NUM_LOCATIONS):
"""
Purpose: create a kind of world map where there are locations from the
knowledge base connected by edges.
"""
chosen_locations = []
for i in range(0, num_locations):
location = choice(self.knowledge.locations)
self.knowledge.locations.remove(location)
chosen_locations.append(location)
# dictionary to express connections between locations:
# connections['location'] = [True, True, False, False,...]
# ->True if there is a connection between the two locations, false
# otherwise
connections = {}
for location in chosen_locations:
options = chosen_locations.copy()
options.remove(location)
connected = []
for option in options:
connected.append(random.random() < RATE_LOCATION_CONNECTED)
connections[location] = connected
locations = {
'names': chosen_locations,
'connections': connections
}
return locations
def init_characters(self, num_characters=NUM_CHARACTERS):
"""
Purpose: create some character objects
"""
characters = []
for i in range(0, num_characters):
# def __init__(self, name, personality, goal, health, location):
name = choice(self.knowledge.names)
# don't want multiple characters to have the same name
self.knowledge.names.remove(name)
personality = character.Personality()
goal = None
health = random.randint(0, 100)
location = choice(self.locations['names'])
characters.append(character.Character(name, personality, goal, health, location))
return characters
class Event:
"""
Keep track of the time that an event occured during. Using a class
because dot notation is better than dictionaries.
"""
def __init__(self, time, action):
self.time = time
self.action = action
# TODO: flesh things out so that the same narrator can create multiple episodes
# of narrative (as long as there are still characters left!)
# class Season:
# """
# Purpose: preserve details of characters and setting accross episodes
# """
# def __init__(self, characters, setting):
# return
class Episode:
"""
Purpose: generated scripts organized into episodes, multiple
episodes can be told by the same narrator (i.e. same characters
and setting).
Includes the following methods:
write_narrative
write_script
get_narrative_from_action
evaluate
output_script
"""
def __init__(self, narrator, num_actions=NUM_ACTIONS):
self.narrator = narrator
self.narrative = []
self.script = []
self.num_actions = num_actions
self.time = 0
self.starting_connections = []
self.final_connections = []
self.score = -1
def write_narrative(self):
"""
Purpose: determine the sequences of actions that make up the script.
"""
narrator = self.narrator
curr_acts = 0
events = []
opener = 'We begin our story in {}. '.format(narrator.current_location)
for character in narrator.characters_at(narrator.current_location):
opener += '{} is here. '.format(character)
opener += '\n'
# this would be used for a string representation of the social
# structure, but we don't use this
initial_social_structre = narrator.social_network.for_narrative()
self.starting_connections = narrator.social_network.for_fitness()
for character in narrator.characters:
opener += character.tell_personality() + '\n'
events.append(Event(0, opener))
while curr_acts < self.num_actions:
# want to only be focused on locations where there are characters than
# can do things
possible_characters = narrator.characters_at(narrator.current_location)
can_act = narrator.can_act(possible_characters)
if len(can_act) == 0:
change_narrative_location = narrator.change_location()
events.append(Event(self.time, change_narrative_location))
if change_narrative_location == 'There are no characters left.':
# the story is OVER
narrator.story_over = True
break
else:
next_actor = choice(can_act)
act = next_actor.action_maker(narrator.locations,
narrator.characters, possible_characters, narrator.social_network)
events.append(Event(self.time, act))
curr_acts += 1
self.time += 1
self.final_connections = narrator.social_network.for_fitness()
closer = 'And thus ends the episode. \n'
# this would be used for a string representation of the social
# structure, but we don't use this
end_social_structre = narrator.social_network.for_narrative()
events.append(Event(self.time, closer))
self.narrative = events
def write_script(self):
"""
Purpose: from a list of actions, get the narrative elements from each
event.
"""
for event in self.narrative:
self.script.append(self.get_narrative_from_action(event))
def get_narrative_from_action(self, event):
"""
Purpose: translate a event into a string representing the event.
"""
# TODO: set up way of getting narrative form that's more interesting
# (BIG TODO)
return str(event.action.strip())
def evalutate(self):
"""
Purpose: evalutate an episode of the script by summing the change
in the connection parameters between the characters.
"""
changes = []
total_theta = 0
for i in range(0, len(self.starting_connections)):
for j in range(0, len(self.starting_connections[i])):
total_theta += abs(self.starting_connections[i][j] -
self.final_connections[i][j])
self.score = total_theta
return total_theta
def output_script(self):
"""
Purpose: write the script to the output folder.
"""
# list the number of things in results folder, use that number
# for a unique file name
num_results = len(os.listdir(RESULTS_PATH))
file_name = RESULTS_NAME + str(num_results) + '.txt'
file = open(RESULTS_PATH + '/' + file_name, 'w')
for event in self.script:
file.write(event + '\n')
def main():
narrator = Narrator()
episodes = []
bsf = []
bsf_score = 0
# generate 100 scripts using this narrator and choose the one with the
# highest fitness, which is currently a measure of how dramatically
# character connection values changed from the start of the episode
# to the end.
for i in range(0, NUM_SCRIPTS):
episode = Episode(narrator)
episode.write_narrative()
episode.write_script()
episode.evalutate()
if episode.score > bsf_score:
bsf = episode
bsf_score = episode.score
bsf.output_script()
if __name__ == '__main__':
main()
|
x = [True, 1, 1.0, [True], (1,), dict(a = 1)]
print all([])
print all(x)
print all(x + [0])
try:
print all()
except TypeError, E:
print "Fail", E
|
# dataset settings
dataset_type = 'MGSDataset'
data_root = 'data/mgs'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=False),
dict(type='Resize', img_scale=(1120, 768), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1120, 768),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/train',
ann_dir='groundtruth/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/val',
ann_dir='groundtruth/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/val',
ann_dir='groundtruth/val',
pipeline=test_pipeline))
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.analytics.data_v1alpha.types import analytics_data_api
from .base import AlphaAnalyticsDataTransport, DEFAULT_CLIENT_INFO
from .grpc import AlphaAnalyticsDataGrpcTransport
class AlphaAnalyticsDataGrpcAsyncIOTransport(AlphaAnalyticsDataTransport):
"""gRPC AsyncIO backend transport for AlphaAnalyticsData.
Google Analytics reporting data service.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "analyticsdata.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "analyticsdata.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def run_report(
self,
) -> Callable[
[analytics_data_api.RunReportRequest],
Awaitable[analytics_data_api.RunReportResponse],
]:
r"""Return a callable for the run report method over gRPC.
Returns a customized report of your Google Analytics
event data. Reports contain statistics derived from data
collected by the Google Analytics tracking code. The
data returned from the API is as a table with columns
for the requested dimensions and metrics. Metrics are
individual measurements of user activity on your
property, such as active users or event count.
Dimensions break down metrics across some common
criteria, such as country or event name.
Returns:
Callable[[~.RunReportRequest],
Awaitable[~.RunReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_report" not in self._stubs:
self._stubs["run_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunReport",
request_serializer=analytics_data_api.RunReportRequest.serialize,
response_deserializer=analytics_data_api.RunReportResponse.deserialize,
)
return self._stubs["run_report"]
@property
def run_pivot_report(
self,
) -> Callable[
[analytics_data_api.RunPivotReportRequest],
Awaitable[analytics_data_api.RunPivotReportResponse],
]:
r"""Return a callable for the run pivot report method over gRPC.
Returns a customized pivot report of your Google
Analytics event data. Pivot reports are more advanced
and expressive formats than regular reports. In a pivot
report, dimensions are only visible if they are included
in a pivot. Multiple pivots can be specified to further
dissect your data.
Returns:
Callable[[~.RunPivotReportRequest],
Awaitable[~.RunPivotReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_pivot_report" not in self._stubs:
self._stubs["run_pivot_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunPivotReport",
request_serializer=analytics_data_api.RunPivotReportRequest.serialize,
response_deserializer=analytics_data_api.RunPivotReportResponse.deserialize,
)
return self._stubs["run_pivot_report"]
@property
def batch_run_reports(
self,
) -> Callable[
[analytics_data_api.BatchRunReportsRequest],
Awaitable[analytics_data_api.BatchRunReportsResponse],
]:
r"""Return a callable for the batch run reports method over gRPC.
Returns multiple reports in a batch. All reports must
be for the same Entity.
Returns:
Callable[[~.BatchRunReportsRequest],
Awaitable[~.BatchRunReportsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_run_reports" not in self._stubs:
self._stubs["batch_run_reports"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunReports",
request_serializer=analytics_data_api.BatchRunReportsRequest.serialize,
response_deserializer=analytics_data_api.BatchRunReportsResponse.deserialize,
)
return self._stubs["batch_run_reports"]
@property
def batch_run_pivot_reports(
self,
) -> Callable[
[analytics_data_api.BatchRunPivotReportsRequest],
Awaitable[analytics_data_api.BatchRunPivotReportsResponse],
]:
r"""Return a callable for the batch run pivot reports method over gRPC.
Returns multiple pivot reports in a batch. All
reports must be for the same Entity.
Returns:
Callable[[~.BatchRunPivotReportsRequest],
Awaitable[~.BatchRunPivotReportsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_run_pivot_reports" not in self._stubs:
self._stubs["batch_run_pivot_reports"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/BatchRunPivotReports",
request_serializer=analytics_data_api.BatchRunPivotReportsRequest.serialize,
response_deserializer=analytics_data_api.BatchRunPivotReportsResponse.deserialize,
)
return self._stubs["batch_run_pivot_reports"]
@property
def get_metadata(
self,
) -> Callable[
[analytics_data_api.GetMetadataRequest], Awaitable[analytics_data_api.Metadata]
]:
r"""Return a callable for the get metadata method over gRPC.
Returns metadata for dimensions and metrics available in
reporting methods. Used to explore the dimensions and metrics.
In this method, a Google Analytics GA4 Property Identifier is
specified in the request, and the metadata response includes
Custom dimensions and metrics as well as Universal metadata.
For example if a custom metric with parameter name
``levels_unlocked`` is registered to a property, the Metadata
response will contain ``customEvent:levels_unlocked``. Universal
metadata are dimensions and metrics applicable to any property
such as ``country`` and ``totalUsers``.
Returns:
Callable[[~.GetMetadataRequest],
Awaitable[~.Metadata]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_metadata" not in self._stubs:
self._stubs["get_metadata"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/GetMetadata",
request_serializer=analytics_data_api.GetMetadataRequest.serialize,
response_deserializer=analytics_data_api.Metadata.deserialize,
)
return self._stubs["get_metadata"]
@property
def run_realtime_report(
self,
) -> Callable[
[analytics_data_api.RunRealtimeReportRequest],
Awaitable[analytics_data_api.RunRealtimeReportResponse],
]:
r"""Return a callable for the run realtime report method over gRPC.
The Google Analytics Realtime API returns a
customized report of realtime event data for your
property. These reports show events and usage from the
last 30 minutes.
Returns:
Callable[[~.RunRealtimeReportRequest],
Awaitable[~.RunRealtimeReportResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_realtime_report" not in self._stubs:
self._stubs["run_realtime_report"] = self.grpc_channel.unary_unary(
"/google.analytics.data.v1alpha.AlphaAnalyticsData/RunRealtimeReport",
request_serializer=analytics_data_api.RunRealtimeReportRequest.serialize,
response_deserializer=analytics_data_api.RunRealtimeReportResponse.deserialize,
)
return self._stubs["run_realtime_report"]
__all__ = ("AlphaAnalyticsDataGrpcAsyncIOTransport",)
|
#!/usr/bin/env python2
import numpy as np
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
#self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
#self.weights = [np.random.randn(y, x)
# for x, y in zip(sizes[:-1], sizes[1:])]
self.biases = [np.ones((y,1)) for y in sizes[1:]]
self.weights = [np.ones((y,x)) for x,y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print "Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test)
else:
print "Epoch {0} complete".format(j)
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
print "========================================="
z = zs[-l]
layer = len(self.weights) + 1 - l
print "l: {0}, Layer: {1}".format(l, layer)
print "Z:"
print z
sp = sigmoid_prime(z)
print "Sp:"
print sp
print "Last activation (transposed):"
print activations[-l-1].transpose()
print "Last activation (layer - 1)"
print activations[layer - 1].transpose()
print "Activations[-l]"
print activations[-l]
print "Activations[layer]"
print activations[layer]
print "Next ({0}) weights (transposed):".format(layer+1)
print self.weights[-l+1].transpose()
print "Next weights transposed using layer instead of l and sub 1:"
print self.weights[layer+1 - 1].transpose()
print "Current ({0}) weights:".format(layer)
print self.weights[-l]
print "Old delta:"
print delta
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
print "Nabla_b[-l]:"
print np.array(nabla_b[-l])
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
print "Nabla_w[-l]:"
print np.array(nabla_w[-l])
print "======================="
print "Activations (len: {0}):".format(len(activations))
print activations
print "Weights (len: {0}):".format(len(self.weights))
print self.weights
print "Biases:"
print self.biases
print "Num layers:"
print self.num_layers
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
net = Network([1,2,2,1])
x_train = np.array([10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0])
y_train = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
net.backprop(x_train[0], y_train[0])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.