text
stringlengths 2
999k
|
|---|
import unittest
import numpy as np
from demohamiltonian import *
class TestHamGen(unittest.TestCase):
# Test if output has right dimensions
def testDim(self):
N = 2
v,w = Hamiltonian(N,1,1,1,1)
a = np.shape(v)
b = np.shape(w)
self.assertEqual(a,(N,))
self.assertEqual(b,(N,N))
if __name__ == '__main__':
unittest.main()
|
import codecs
import os
def get_default_rendering_file_content(file_name="render.html"):
"""
Simply returns the content render.html
"""
with codecs.open(file_name, "r", "utf-8") as f:
return f.read()
def get_fixture_content(file_name):
fixture_file = os.path.join("fixtures", file_name)
with codecs.open(fixture_file, "r", "utf-8") as f:
return f.read()
def store_fixture_content(file_name, content):
fixture_file = os.path.join("fixtures", file_name)
with codecs.open(fixture_file, "w", "utf-8") as f:
return f.write(content)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Kunal Diwan
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from pyrogram import (
Client,
filters
)
from pyrogram.types import (
Message
)
from pyrogram.errors import (
SessionPasswordNeeded,
BadRequest
)
from bot import (
ACC_PROK_WITH_TFA,
AKTIFPERINTAH,
PHONE_CODE_IN_VALID_ERR_TEXT,
RECVD_PHONE_CODE,
SESSION_GENERATED_USING
)
@Client.on_message(
filters.text &
filters.private,
group=2
)
async def recv_tg_code_message(_, message: Message):
w_s_dict = AKTIFPERINTAH.get(message.chat.id)
if not w_s_dict:
return
sent_code = w_s_dict.get("SENT_CODE_R")
phone_number = w_s_dict.get("PHONE_NUMBER")
loical_ci = w_s_dict.get("USER_CLIENT")
if not sent_code or not phone_number:
return
status_message = w_s_dict.get("MESSAGE")
if not status_message:
return
# await status_message.delete()
del w_s_dict["MESSAGE"]
status_message = await message.reply_text(
RECVD_PHONE_CODE
)
phone_code = "".join(message.text.split(" "))
try:
w_s_dict["SIGNED_IN"] = await loical_ci.sign_in(
phone_number,
sent_code.phone_code_hash,
phone_code
)
except BadRequest as e:
await status_message.edit_text(
e.MESSAGE + "\n\n" + PHONE_CODE_IN_VALID_ERR_TEXT
)
del AKTIFPERINTAH[message.chat.id]
except SessionPasswordNeeded:
await status_message.edit_text(
ACC_PROK_WITH_TFA
)
w_s_dict["IS_NEEDED_TFA"] = True
else:
saved_message_ = await status_message.edit_text(
"<code>" + str(await loical_ci.export_session_string()) + "</code>"
)
await saved_message_.reply_text(
SESSION_GENERATED_USING,
quote=True
)
del AKTIFPERINTAH[message.chat.id]
return False
AKTIFPERINTAH[message.chat.id] = w_s_dict
raise message.stop_propagation()
|
# Copyright 2016 Cloudbase Solutions.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netifaces
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class IPWrapper(object):
def get_device_by_ip(self, ip):
if not ip:
return
for device in self.get_devices():
if device.device_has_ip(ip):
return device
def get_devices(self):
try:
return [IPDevice(iface) for iface in netifaces.interfaces()]
except (OSError, MemoryError):
LOG.error("Failed to get network interfaces.")
return []
class IPDevice(object):
def __init__(self, name):
self.name = name
self.link = IPLink(self)
def read_ifaddresses(self):
try:
device_addresses = netifaces.ifaddresses(self.name)
except ValueError:
LOG.error("The device does not exist on the system: %s.",
self.name)
return
except OSError:
LOG.error("Failed to get interface addresses: %s.",
self.name)
return
return device_addresses
def device_has_ip(self, ip):
device_addresses = self.read_ifaddresses()
if device_addresses is None:
return False
addresses = [ip_addr['addr'] for ip_addr in
device_addresses.get(netifaces.AF_INET, [])]
return ip in addresses
class IPLink(object):
def __init__(self, parent):
self._parent = parent
@property
def address(self):
device_addresses = self._parent.read_ifaddresses()
if device_addresses is None:
return False
return [eth_addr['addr'] for eth_addr in
device_addresses.get(netifaces.AF_LINK, [])]
|
from typing import List, Optional
from blspy import AugSchemeMPL, PrivateKey, G1Element
from taco.util.ints import uint32
# EIP 2334 bls key derivation
# https://eips.ethereum.org/EIPS/eip-2334
# 12381 = bls spec number
# 8444 = Taco blockchain number and port number
# 0, 1, 2, 3, 4, 5, 6 farmer, pool, wallet, local, backup key, singleton, pooling authentication key numbers
def _derive_path(sk: PrivateKey, path: List[int]) -> PrivateKey:
for index in path:
sk = AugSchemeMPL.derive_child_sk(sk, index)
return sk
def master_sk_to_farmer_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 0, 0])
def master_sk_to_pool_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 1, 0])
def master_sk_to_wallet_sk(master: PrivateKey, index: uint32) -> PrivateKey:
return _derive_path(master, [12381, 8444, 2, index])
def master_sk_to_local_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 3, 0])
def master_sk_to_backup_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 4, 0])
def master_sk_to_singleton_owner_sk(master: PrivateKey, wallet_id: uint32) -> PrivateKey:
"""
This key controls a singleton on the blockchain, allowing for dynamic pooling (changing pools)
"""
return _derive_path(master, [12381, 8444, 5, wallet_id])
def master_sk_to_pooling_authentication_sk(master: PrivateKey, wallet_id: uint32, index: uint32) -> PrivateKey:
"""
This key is used for the farmer to authenticate to the pool when sending partials
"""
assert index < 10000
assert wallet_id < 10000
return _derive_path(master, [12381, 8444, 6, wallet_id * 10000 + index])
async def find_owner_sk(all_sks: List[PrivateKey], owner_pk: G1Element) -> Optional[G1Element]:
for wallet_id in range(50):
for sk in all_sks:
auth_sk = master_sk_to_singleton_owner_sk(sk, uint32(wallet_id))
if auth_sk.get_g1() == owner_pk:
return auth_sk
return None
async def find_authentication_sk(all_sks: List[PrivateKey], authentication_pk: G1Element) -> Optional[PrivateKey]:
# NOTE: might need to increase this if using a large number of wallets, or have switched authentication keys
# many times.
for auth_key_index in range(20):
for wallet_id in range(20):
for sk in all_sks:
auth_sk = master_sk_to_pooling_authentication_sk(sk, uint32(wallet_id), uint32(auth_key_index))
if auth_sk.get_g1() == authentication_pk:
return auth_sk
return None
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.gpt_neo import PreTrained
from ...models.gpt_neo import ForCausal
logging.set_verbosity_info()
log = logging.get_logger(__name__)
def load_src_weights(model, config, gpt_neo_checkpoint_path):
tf_path = abspath(gpt_neo_checkpoint_path)
log.info(f"Converting TensorFlow checkpoint from {tf_path}")
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
if "global_step" not in name and "adam" not in name:
array = tf.train.load_variable(tf_path, name)
array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
name = name.replace("attn/q", "attn/attention/q_proj/w")
name = name.replace("attn/k", "attn/attention/k_proj/w")
name = name.replace("attn/v", "attn/attention/v_proj/w")
name = name.replace("attn/o", "attn/attention/out_proj/w")
name = name.replace("norm_1", "ln_1")
name = name.replace("norm_2", "ln_2")
name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "gpt2/"
name = name.split("/")
pointer = model.transformer
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if name[-1] == "w" and name[-2] in [
"out_proj",
"k_proj",
"q_proj",
"v_proj",
"c_proj",
"c_fc",
]:
array = array.transpose()
if name == ["wte"]:
array = array[: config.s_vocab]
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}"
)
print(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
embs = model.transformer.wte.weight
lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False)
lin.weight = embs
model.set_output_embeddings(lin)
return model
def to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
config_json = json.load(open(config_file, "r"))
cfg = GPTNeoConfig(
d_hidden=config_json["n_embd"],
n_lays=config_json["n_lays"],
n_heads=config_json["n_heads"],
attention_types=config_json["attention_types"],
n_pos=config_json["n_pos"],
drop_resid=config_json["res_dropout"],
drop_embed=config_json["drop_embed"],
drop_attn=config_json["attn_dropout"],
)
print(f"Building from config: {cfg}")
m = ForCausal(cfg)
load_src_weights(m, cfg, tf_checkpoint_path)
print(f"Saving to: {pytorch_dump_path}")
m.save_pretrained(pytorch_dump_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", default=None, type=str, required=True)
x.add_argument("--cfg_path", default=None, type=str, required=True)
x.add_argument("--save_path", default=None, type=str, required=True)
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path)
|
test = {
"name": "q2",
"points": 1,
"hidden": True,
"suites": [
{
"cases": [
{
"code": r"""
>>> import hashlib
>>> hashlib.sha256(bytes(str(round(prob_negative, 4)), "utf-8")).hexdigest()
'22fa3ce4995af8d96fcd771f0e1f5d74d8a98f36c3eec8e95bdf7524926b0141'
""",
"hidden": False,
"locked": False,
},
],
"scored": False,
"setup": "",
"teardown": "",
"type": "doctest"
},
]
}
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#290. Word Pattern
#Given a pattern and a string str, find if str follows the same pattern.
#Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
#Examples:
#pattern = "abba", str = "dog cat cat dog" should return true.
#pattern = "abba", str = "dog cat cat fish" should return false.
#pattern = "aaaa", str = "dog cat cat dog" should return false.
#pattern = "abba", str = "dog dog dog dog" should return false.
#Notes:
#You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space.
#Credits:
#Special thanks to @minglotus6 for adding this problem and creating all test cases.
#class Solution(object):
# def wordPattern(self, pattern, str):
# """
# :type pattern: str
# :type str: str
# :rtype: bool
# """
# Time Is Money
|
# -*- coding: utf-8 -*-
# Universidade Federal de Goias
# Instituto de Informática - INF
# Compiladores - Compilador para MGol
#
# Módulo: Tabela de Transições
# Este módulo preenche a tabela de transições do
# autômato finito determinístico da linguagem,
# implementado através de uma lista de dicionários.
#
# Alunos: Carlos Henrique Rorato Souza
# e Larissa Santos de Azevedo
import string
# Constantes para a identificação numérica dos estados
estadoInicial = 0
estadoNum = 1
estadoNumPonto = 2
estadoNumPontoFinal = 3
estadoNumExpoente1 = 4
estadoNumExpoente2 = 5
estadoNumExpoenteFinal = 6
estadoLiteral = 7
estadoLiteralFinal = 8
estadoId = 9
estadoComentario = 10
estadoComentarioFinal = 11
estadoOPM = 12
estadoOPRMenor = 13
estadoRCB = 14
estadoOPRMenorIgualDiferente = 15
estadoOPRMaior = 16
estadoOPRMaiorIgual = 17
estadoOPRIgual = 18
estadoABP = 19
estadoFCP = 20
estadoPTV = 21
# Função que preenche a tabela de transição do autômato
def preenche_tabela_dfa(Tabela_Transicao):
# Estado 0 - estadoInicial:
linha = {}
linha.update({"final": False})
for c in string.ascii_letters:
linha.update({c:estadoId})
for c in range(0,10):
linha.update({str(c):estadoNum})
linha.update({"\n":estadoInicial, " ": estadoInicial, "\t": estadoInicial})
linha.update({"\"": estadoLiteral})
linha.update({"{":estadoComentario})
linha.update({"<":estadoOPRMenor , ">":estadoOPRMaior , "=":estadoOPRIgual})
linha.update({"+": estadoOPM, "-": estadoOPM, "*": estadoOPM, "/": estadoOPM})
linha.update({"(": estadoABP, ")": estadoFCP, ";": estadoPTV})
Tabela_Transicao.append(linha)
# Estado 1 - estadoNum:
linha = {}
linha.update({"final": True})
for c in range(0, 10):
linha.update({str(c): estadoNum})
linha.update({".": estadoNumPonto, "E": estadoNumExpoente1, "e": estadoNumExpoente1})
Tabela_Transicao.append(linha)
# Estado 2 - estadoNumPonto:
linha = {}
linha.update({"final": False})
for c in range(0, 10):
linha.update({str(c): estadoNumPontoFinal})
Tabela_Transicao.append(linha)
# Estado 3 - estadoNumPontoFinal:
linha = {}
linha.update({"final": True})
for c in range(0, 10):
linha.update({str(c): estadoNumPontoFinal})
linha.update({"E": estadoNumExpoente1, "e": estadoNumExpoente1})
Tabela_Transicao.append(linha)
# Estado 4 - estadoNumExpoente1:
linha = {}
linha.update({"final": False})
for c in range(0, 10):
linha.update({str(c): estadoNumExpoenteFinal})
linha.update({"+": estadoNumExpoente2, "-": estadoNumExpoente2})
Tabela_Transicao.append(linha)
# Estado 5 - estadoNumExpoente2:
linha = {}
linha.update({"final": False})
for c in range(0, 10):
linha.update({str(c): estadoNumExpoenteFinal})
Tabela_Transicao.append(linha)
# Estado 6 - estadoNumExpoenteFinal:
linha = {}
linha.update({"final": True})
for c in range(0, 10):
linha.update({str(c): estadoNumExpoenteFinal})
Tabela_Transicao.append(linha)
# Estado 7 - estadoLiteral
linha = {}
linha.update({"final": False})
for c in string.printable:
if c != "\"":
linha.update({c:estadoLiteral})
linha.update({"\"": estadoLiteralFinal})
Tabela_Transicao.append(linha)
# Estado 8 - estadoLiteralFinal
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 9 - estadoId
linha = {}
linha.update({"final": True})
for c in range(0, 10):
linha.update({str(c): estadoId})
for c in string.ascii_letters:
linha.update({c:estadoId})
linha.update({"_": estadoId})
Tabela_Transicao.append(linha)
# Estado 10 - estadoComentario
linha = {}
linha.update({"final": False})
for c in string.printable:
linha.update({c:estadoComentario})
linha.update({"\n": estadoComentario})
linha.update({"\t": estadoComentario})
linha.update({"}": estadoComentarioFinal})
Tabela_Transicao.append(linha)
# Estado 11 - estadoComentarioFinal
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 12 - estadoOPM
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 13 - estadoOPRMenor
linha = {}
linha.update({"final": True})
linha.update({"-": estadoRCB})
linha.update({">": estadoOPRMenorIgualDiferente, "=": estadoOPRMenorIgualDiferente})
Tabela_Transicao.append(linha)
# Estado 14 - estadoRCB
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 15 - estadoOPRMenorIgualDiferente
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 16 - estadoOPRMaior
linha = {}
linha.update({"final": True})
linha.update({"=": estadoOPRMaiorIgual})
Tabela_Transicao.append(linha)
# Estado 17 - estadoOPRMaiorIgual
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 18 - estadoOPRIgual
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 19 - estadoABP
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 20 - estadoFCP
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Estado 21 - estadoPTV
linha = {}
linha.update({"final": True})
Tabela_Transicao.append(linha)
# Aqui algumas funções para teste
#Teste = []
#preenche_tabela_dfa(Teste)
##Imprime toda a tabela de transições
#i = 0
#for k in Teste:
# print("Estado:" + str(i) + " " + str(Teste[i]) + "\n")
# i+=1
|
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def sfml():
http_archive(
name="sfml" ,
build_file="//bazel/deps/sfml:build.BUILD" ,
sha256="6b013624aa9a916da2d37180772031e963098494538f59a14f40e00db23c9077" ,
strip_prefix="SFML-257e50beb886f1edebeebbde1903169da4eca39f" ,
urls = [
"https://github.com/Unilang/SFML/archive/257e50beb886f1edebeebbde1903169da4eca39f.tar.gz",
], patch_cmds = [
"sed -i 's/.*m_key(0).*/ptr(nullptr)/' src/SFML/System/Unix/ThreadLocalImpl.cpp",
"sed -i 's/.*pthread_key_create.*/ptr = nullptr;/' src/SFML/System/Unix/ThreadLocalImpl.cpp",
"sed -i 's/.*pthread_key_delete.*//' src/SFML/System/Unix/ThreadLocalImpl.cpp",
"sed -i 's/.*pthread_setspecific.*/ptr = value;/' src/SFML/System/Unix/ThreadLocalImpl.cpp",
"sed -i 's/.*pthread_getspecific.*/return ptr;/' src/SFML/System/Unix/ThreadLocalImpl.cpp",
"sed -i 's/.*pthread_key_t m_key.*/void* ptr=nullptr;/' src/SFML/System/Unix/ThreadLocalImpl.hpp",
],
)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: configuration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from app_store_sdk.model.collector_center import script_pb2 as app__store__sdk_dot_model_dot_collector__center_dot_script__pb2
from app_store_sdk.model.collector_center import target_range_pb2 as app__store__sdk_dot_model_dot_collector__center_dot_target__range__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='configuration.proto',
package='collector_center',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/collector_center'),
serialized_pb=_b('\n\x13\x63onfiguration.proto\x12\x10\x63ollector_center\x1a\x31\x61pp_store_sdk/model/collector_center/script.proto\x1a\x37\x61pp_store_sdk/model/collector_center/target_range.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xf2\x03\n\rConfiguration\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12&\n\x06kwargs\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x0f\n\x07timeout\x18\x05 \x01(\x05\x12#\n\x03\x65nv\x18\x06 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x10\n\x08\x64isabled\x18\x07 \x01(\x08\x12\x0e\n\x06labels\x18\x08 \x03(\t\x12\x15\n\rignoreInvalid\x18\t \x01(\x08\x12(\n\x06script\x18\n \x01(\x0b\x32\x18.collector_center.Script\x12\x32\n\x0btargetRange\x18\x0b \x01(\x0b\x32\x1d.collector_center.TargetRange\x12\x10\n\x08interval\x18\x0c \x01(\x05\x12\x10\n\x08\x63\x61\x63heTtl\x18\r \x01(\x05\x12\x11\n\ttimeRange\x18\x0e \x01(\t\x12\x0f\n\x07\x63lazzId\x18\x0f \x01(\t\x12\x11\n\tclazzName\x18\x10 \x01(\t\x12\x0f\n\x07\x63reator\x18\x11 \x01(\t\x12\x10\n\x08modifier\x18\x12 \x01(\t\x12\r\n\x05\x63time\x18\x13 \x01(\x05\x12\r\n\x05mtime\x18\x14 \x01(\x05\x12\x10\n\x08objectId\x18\x15 \x01(\t\x12\x17\n\x0finstanceIdMacro\x18\x16 \x01(\tBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/collector_centerb\x06proto3')
,
dependencies=[app__store__sdk_dot_model_dot_collector__center_dot_script__pb2.DESCRIPTOR,app__store__sdk_dot_model_dot_collector__center_dot_target__range__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_CONFIGURATION = _descriptor.Descriptor(
name='Configuration',
full_name='collector_center.Configuration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='collector_center.Configuration.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='collector_center.Configuration.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='collector_center.Configuration.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kwargs', full_name='collector_center.Configuration.kwargs', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeout', full_name='collector_center.Configuration.timeout', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env', full_name='collector_center.Configuration.env', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disabled', full_name='collector_center.Configuration.disabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='collector_center.Configuration.labels', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ignoreInvalid', full_name='collector_center.Configuration.ignoreInvalid', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='script', full_name='collector_center.Configuration.script', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetRange', full_name='collector_center.Configuration.targetRange', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interval', full_name='collector_center.Configuration.interval', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cacheTtl', full_name='collector_center.Configuration.cacheTtl', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeRange', full_name='collector_center.Configuration.timeRange', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clazzId', full_name='collector_center.Configuration.clazzId', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clazzName', full_name='collector_center.Configuration.clazzName', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='collector_center.Configuration.creator', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modifier', full_name='collector_center.Configuration.modifier', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='collector_center.Configuration.ctime', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='collector_center.Configuration.mtime', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='collector_center.Configuration.objectId', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceIdMacro', full_name='collector_center.Configuration.instanceIdMacro', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=678,
)
_CONFIGURATION.fields_by_name['kwargs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_CONFIGURATION.fields_by_name['env'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_CONFIGURATION.fields_by_name['script'].message_type = app__store__sdk_dot_model_dot_collector__center_dot_script__pb2._SCRIPT
_CONFIGURATION.fields_by_name['targetRange'].message_type = app__store__sdk_dot_model_dot_collector__center_dot_target__range__pb2._TARGETRANGE
DESCRIPTOR.message_types_by_name['Configuration'] = _CONFIGURATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Configuration = _reflection.GeneratedProtocolMessageType('Configuration', (_message.Message,), {
'DESCRIPTOR' : _CONFIGURATION,
'__module__' : 'configuration_pb2'
# @@protoc_insertion_point(class_scope:collector_center.Configuration)
})
_sym_db.RegisterMessage(Configuration)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
"""
Write a function `rock_paper_scissors` to generate all of the possible plays that can be made in a game of "Rock Paper Scissors", given some input `n`, which represents the number of plays per round.
For example, given n = 2, your function should output the following:
[['rock', 'rock'], ['rock', 'paper'], ['rock', 'scissors'], ['paper', 'rock'], ['paper', 'paper'], ['paper', 'scissors'], ['scissors', 'rock'], ['scissors', 'paper'], ['scissors', 'scissors']]
Your output should be a list of lists containing strings. Each inner list should have length equal to the input n.
* You'll want to define a list with all of the possible Rock Paper Scissors plays.
* Another problem that asks you to generate a bunch of permutations, so we're probably going to want to opt for using recursion again. Since we're building up a list of results, we'll have to pass the list we're constructing around to multiple recursive calls so that each recursive call can add to the overall result. However, the tests only give our function `n` as input. To get around this, we could define an inner recursive helper function that will perform the recursion for us, while allowing us to preserve the outer function's function signature.
* In Python, you can concatenate two lists with the `+` operator. However, you'll want to make sure that both operands are lists!
* If you opt to define an inner recursive helper function, don't forget to make an initial call to the recursive helper function to kick off the recursion.
Run the test file by executing `python test_rps.py`.
You can also test your implementation manually by executing `python rps.py [n]`.
"""
import sys
def rock_paper_scissors(n):
def generate(l, num):
if len(l) == n:
return [l]
else:
num = n - 1
return generate(l + ['rock'], num) + generate(l + ['paper'], num) + generate(l + ['scissors'], num)
return generate([], n)
if __name__ == "__main__":
if len(sys.argv) > 1:
num_plays = int(sys.argv[1])
print(rock_paper_scissors(num_plays))
else:
print('Usage: rps.py [num_plays]')
|
import collections
from rtamt.operation.abstract_operation import AbstractOperation
class OnceBoundedOperation(AbstractOperation):
def __init__(self, begin, end):
self.begin = begin
self.end = end
self.buffer = collections.deque(maxlen=(self.end + 1))
for i in range(self.end + 1):
val = - float("inf")
self.buffer.append(val)
def reset(self):
self.buffer = collections.deque(maxlen=(self.end + 1))
for i in range(self.end + 1):
val = - float("inf")
self.buffer.append(val)
def update(self, sample):
self.buffer.append(sample)
out = -float("inf")
for i in range(self.end-self.begin+1):
out = max(out, self.buffer[i])
return out
|
"""
WSGI config for s10day12bbs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "s10day12bbs.settings")
application = get_wsgi_application()
|
################################################################################
## Form generated from reading UI file 'FlowchartCtrlTemplate.ui'
##
## Created by: Qt User Interface Compiler version 6.1.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from ..widgets.TreeWidget import TreeWidget
from ..widgets.FeedbackButton import FeedbackButton
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(217, 499)
self.gridLayout = QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setVerticalSpacing(0)
self.loadBtn = QPushButton(Form)
self.loadBtn.setObjectName(u"loadBtn")
self.gridLayout.addWidget(self.loadBtn, 1, 0, 1, 1)
self.saveBtn = FeedbackButton(Form)
self.saveBtn.setObjectName(u"saveBtn")
self.gridLayout.addWidget(self.saveBtn, 1, 1, 1, 2)
self.saveAsBtn = FeedbackButton(Form)
self.saveAsBtn.setObjectName(u"saveAsBtn")
self.gridLayout.addWidget(self.saveAsBtn, 1, 3, 1, 1)
self.reloadBtn = FeedbackButton(Form)
self.reloadBtn.setObjectName(u"reloadBtn")
self.reloadBtn.setCheckable(False)
self.reloadBtn.setFlat(False)
self.gridLayout.addWidget(self.reloadBtn, 4, 0, 1, 2)
self.showChartBtn = QPushButton(Form)
self.showChartBtn.setObjectName(u"showChartBtn")
self.showChartBtn.setCheckable(True)
self.gridLayout.addWidget(self.showChartBtn, 4, 2, 1, 2)
self.ctrlList = TreeWidget(Form)
__qtreewidgetitem = QTreeWidgetItem()
__qtreewidgetitem.setText(0, u"1");
self.ctrlList.setHeaderItem(__qtreewidgetitem)
self.ctrlList.setObjectName(u"ctrlList")
self.ctrlList.header().setVisible(False)
self.ctrlList.header().setStretchLastSection(False)
self.gridLayout.addWidget(self.ctrlList, 3, 0, 1, 4)
self.fileNameLabel = QLabel(Form)
self.fileNameLabel.setObjectName(u"fileNameLabel")
font = QFont()
font.setBold(True)
self.fileNameLabel.setFont(font)
self.fileNameLabel.setAlignment(Qt.AlignCenter)
self.gridLayout.addWidget(self.fileNameLabel, 0, 1, 1, 1)
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"PyQtGraph", None))
self.loadBtn.setText(QCoreApplication.translate("Form", u"Load..", None))
self.saveBtn.setText(QCoreApplication.translate("Form", u"Save", None))
self.saveAsBtn.setText(QCoreApplication.translate("Form", u"As..", None))
self.reloadBtn.setText(QCoreApplication.translate("Form", u"Reload Libs", None))
self.showChartBtn.setText(QCoreApplication.translate("Form", u"Flowchart", None))
self.fileNameLabel.setText("")
# retranslateUi
|
from aioify import aioify
from discord.ext import commands, tasks
import aiohttp
import aiosqlite
import asyncio
import discord
import json
import os
import shutil
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.os = aioify(os, name='os')
self.shutil = aioify(shutil, name='shutil')
self.utils = self.bot.get_cog('Utils')
self.auto_clean_db.start()
self.signing_party_detection.start()
self.auto_invalid_device_check.start()
@tasks.loop()
async def auto_clean_db(self) -> None:
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss') as cursor:
data = await cursor.fetchall()
for user_devices in data:
devices = json.loads(user_devices[0])
if devices == list():
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('DELETE FROM autotss WHERE devices = ?', (user_devices[0],))
await db.commit()
await asyncio.sleep(300)
@auto_clean_db.before_loop
async def before_auto_clean_db(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@tasks.loop()
async def signing_party_detection(self) -> None:
async with aiohttp.ClientSession() as session:
async with session.get('https://api.ipsw.me/v4/devices') as resp:
devices = await resp.json()
devices = [d for d in devices if any(x in d['identifier'] for x in ('iPhone', 'AppleTV', 'iPod', 'iPad'))]
api = dict()
for device in [d['identifier'] for d in devices]:
api[device] = await self.utils.get_firms(session, device)
try:
self._api
except AttributeError:
self._api = api
return
for device in self._api.keys():
for firm in [x for x in self._api[device] if x['signed'] == False]:
if any(new_firm['signed'] == True for new_firm in api[device] if new_firm['buildid'] == firm['buildid']):
print(f"[SIGN] Detected resigned firmware for: {device}, iOS {firm['version']}")
await self.utils.update_auto_saver_frequency(60) # Set blob saver frequency to 1 minute
tss = self.bot.get_cog('TSS') # Get TSS class
tss.blobs_loop = False
tss.auto_blob_saver.cancel() # Restart auto blob saver
await asyncio.sleep(1)
await self.utils.update_device_count()
tss.auto_blob_saver.start()
await asyncio.sleep(600) # Wait 10 minutes
await self.utils.update_auto_saver_frequency() # Set blob saver frequency back to 3 hours
tss.auto_blob_saver.cancel() # Restart auto blob saver
await asyncio.sleep(1)
tss.auto_blob_saver.start()
return
else:
self._api[device] = api[device]
await asyncio.sleep(30)
@signing_party_detection.before_loop
async def before_signing_party_detection(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@tasks.loop()
async def auto_invalid_device_check(self) -> None: # If any users are saving SHSH blobs for A12+ devices without using custom apnonces, attempt to DM them saying they need to re-add the device
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * FROM autotss') as cursor:
data = await cursor.fetchall()
if len(data) == 0:
return
invalid_devices = dict()
async with aiohttp.ClientSession() as session:
for userinfo in data:
userid = userinfo[0]
devices = json.loads(userinfo[1])
invalid_devices[userid] = list()
for device in devices:
cpid = await self.utils.get_cpid(session, device['identifier'], device['boardconfig'])
if (device['apnonce'] is not None) and (await self.utils.check_apnonce(cpid, device['apnonce']) == False):
invalid_devices[userid].append(device)
continue
if (device['generator'] is not None) and (await self.utils.check_generator(device['generator']) == False):
invalid_devices[userid].append(device)
continue
if (32800 <= cpid < 35072) and (device['apnonce'] is None):
invalid_devices[userid].append(device)
for userid in [x for x in invalid_devices.keys() if len(invalid_devices[x]) > 0]:
embed = discord.Embed(title='Hey!')
msg = (
'One or more of your devices were added incorrectly to AutoTSS, and are saving **invalid SHSH blobs**.',
'Due to this, they have been removed from AutoTSS so they are no longer continuing to save invalid SHSH blobs.'
'To fix this, please re-add the following devices to AutoTSS:'
)
embed.description = '\n'.join(msg)
for device in invalid_devices[userid]:
device_info = [
f"Device Identifier: `{device['identifier']}`",
f"ECID: `{device['ecid']}`",
f"Boardconfig: `{device['boardconfig']}`"
]
if device['generator'] is not None:
device_info.insert(-1, f"Custom generator: `{device['generator']}`")
if device['apnonce'] is not None:
device_info.insert(-1, f"Custom ApNonce: `{device['apnonce']}`")
embed.add_field(name=f"**{device['name']}**", value='\n'.join(device_info))
user = await self.bot.fetch_user(userid)
try:
await user.send(embed=embed)
except:
pass
async with aiosqlite.connect('Data/autotss.db') as db:
for device in invalid_devices[userid]:
await self.shutil.rmtree(f"Data/Blobs/{device['ecid']}")
async with db.execute('SELECT devices FROM autotss WHERE user = ?', (userid,)) as cursor:
devices = json.loads((await cursor.fetchone())[0])
devices.pop(next(devices.index(x) for x in devices if x['ecid'] == device['ecid']))
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), userid))
await db.commit()
await asyncio.sleep(259200)
@auto_invalid_device_check.before_loop
async def before_invalid_device_check(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db:
async with db.execute('SELECT prefix from prefix WHERE guild = ?', (guild.id,)) as cursor:
if await cursor.fetchone() is not None:
await db.execute('DELETE from prefix where guild = ?', (guild.id,))
await db.commit()
await db.execute('INSERT INTO prefix(guild, prefix) VALUES(?,?)', (guild.id, 'b!'))
await db.commit()
embed = await self.utils.info_embed('b!', self.bot.user)
for channel in guild.text_channels:
try:
await channel.send(embed=embed)
break
except:
pass
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('DELETE from prefix where guild = ?', (guild.id,))
await db.commit()
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * from autotss WHERE user = ?', (member.id,)) as cursor:
data = await cursor.fetchone()
if data is None:
return
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET enabled = ? WHERE user = ?', (True, member.id))
await db.commit()
await self.utils.update_device_count()
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * from autotss WHERE user = ?', (member.id,)) as cursor:
data = await cursor.fetchone()
if data is None:
return
if len(member.mutual_guilds) == 0:
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET enabled = ? WHERE user = ?', (False, member.id))
await db.commit()
await self.utils.update_device_count()
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
await self.bot.wait_until_ready()
if message.channel.type == discord.ChannelType.private:
return
if message.content.replace(' ', '').replace('!', '') == self.bot.user.mention:
whitelist = await self.utils.get_whitelist(message.guild.id)
if (whitelist is not None) and (whitelist.id != message.channel.id):
return
prefix = await self.utils.get_prefix(message.guild.id)
embed = discord.Embed(title='AutoTSS', description=f'My prefix is `{prefix}`. To see all of my commands, run `{prefix}help`.')
embed.set_footer(text=message.author.name, icon_url=message.author.avatar_url_as(static_format='png'))
try:
await message.reply(embed=embed)
except:
pass
@commands.Cog.listener()
async def on_ready(self) -> None:
await self.os.makedirs('Data', exist_ok=True)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('''
CREATE TABLE IF NOT EXISTS autotss(
user INTEGER,
devices JSON,
enabled BOOLEAN
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS prefix(
guild INTEGER,
prefix TEXT
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS whitelist(
guild INTEGER,
channel INTEGER,
enabled BOOLEAN
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS auto_frequency(
time INTEGER
)
''')
await db.commit()
await self.utils.update_device_count()
await self.utils.update_auto_saver_frequency()
print('AutoTSS is now online.')
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error) -> None:
await self.bot.wait_until_ready()
embed = discord.Embed(title='Error')
if ctx.message.channel.type == discord.ChannelType.private:
embed.description = 'AutoTSS cannot be used in DMs. Please use AutoTSS in a Discord server.'
await ctx.reply(embed=embed)
return
if await self.utils.whitelist_check(ctx) != True:
return
prefix = await self.utils.get_prefix(ctx.guild.id)
if isinstance(error, commands.CommandNotFound):
if ctx.prefix.replace('!', '').replace(' ', '') == self.bot.user.mention:
return
embed.description = f"That command doesn't exist! Use `{prefix}help` to see all the commands I can run."
await ctx.reply(embed=embed)
elif isinstance(error, commands.MaxConcurrencyReached):
embed.description = f"`{prefix + ctx.command.qualified_name}` cannot be ran more than once at the same time!"
await ctx.reply(embed=embed)
elif isinstance(error, commands.errors.CommandInvokeError):
if isinstance(error.original, discord.errors.Forbidden):
embed.description = f"I don't have the proper permissions to run correctly! \
Please ping an Administrator and tell them to kick & re-invite me using \
[this]({self.utils.invite}) link to fix this issue."
message_sent = False
for channel in ctx.guild.text_channels:
try:
await channel.send(embed=embed)
message_sent = True
break
except:
pass
if message_sent:
return
try:
embed.description = f"I don't have the proper permissions to run correctly! \
Please kick me from `{ctx.guild.name}` & re-invite me using \
[this]({self.utils.invite}) link to fix this issue."
await ctx.guild.owner.send(embed=embed)
except: # We can't tell the user to tell an admin to fix our permissions, we can't DM the owner to fix it, we might as well leave.
await ctx.guild.leave()
else:
raise error
elif isinstance(error, commands.ChannelNotFound):
embed = discord.Embed(title='Error', description='That channel does not exist.')
await ctx.reply(embed=embed)
elif (isinstance(error, commands.errors.NotOwner)) or \
(isinstance(error, commands.MissingPermissions)):
return
else:
raise error
def setup(bot):
bot.add_cog(Events(bot))
|
"""
WSGI config for mozblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mozblog.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
from tkinter import *
tab = Tk()
tab.title("Special Midterm Exam in OOP")
tab.geometry("700x500+20+10")
def magic():
btn.configure(bg="yellow")
btn = Button(tab, text="Click to Change Color", command=magic)
btn.place(x=350, y=250, anchor="center")
tab.mainloop()
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Glue between metadata sources and the matching logic."""
from __future__ import division, absolute_import, print_function
from collections import namedtuple
from functools import total_ordering
import re
from beets import logging
from beets import plugins
from beets import config
from beets.util import as_string
from beets.autotag import mb
from jellyfish import levenshtein_distance
from unidecode import unidecode
import six
log = logging.getLogger('beets')
# The name of the type for patterns in re changed in Python 3.7.
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
# Classes used to represent candidate options.
class AlbumInfo(object):
"""Describes a canonical release that may be used to match a release
in the library. Consists of these data members:
- ``album``: the release title
- ``album_id``: MusicBrainz ID; UUID fragment only
- ``artist``: name of the release's primary artist
- ``artist_id``
- ``tracks``: list of TrackInfo objects making up the release
- ``asin``: Amazon ASIN
- ``albumtype``: string describing the kind of release
- ``va``: boolean: whether the release has "various artists"
- ``year``: release year
- ``month``: release month
- ``day``: release day
- ``label``: music label responsible for the release
- ``mediums``: the number of discs in this release
- ``artist_sort``: name of the release's artist for sorting
- ``releasegroup_id``: MBID for the album's release group
- ``catalognum``: the label's catalog number for the release
- ``script``: character set used for metadata
- ``language``: human language of the metadata
- ``country``: the release country
- ``albumstatus``: MusicBrainz release status (Official, etc.)
- ``media``: delivery mechanism (Vinyl, etc.)
- ``albumdisambig``: MusicBrainz release disambiguation comment
- ``releasegroupdisambig``: MusicBrainz release group
disambiguation comment.
- ``artist_credit``: Release-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
``mediums`` along with the fields up through ``tracks`` are required.
The others are optional and may be None.
"""
def __init__(self, album, album_id, artist, artist_id, tracks, asin=None,
albumtype=None, va=False, year=None, month=None, day=None,
label=None, mediums=None, artist_sort=None,
releasegroup_id=None, catalognum=None, script=None,
language=None, country=None, style=None, genre=None,
albumstatus=None, media=None, albumdisambig=None,
releasegroupdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source=None, data_url=None,
discogs_albumid=None, discogs_labelid=None,
discogs_artistid=None):
self.album = album
self.album_id = album_id
self.artist = artist
self.artist_id = artist_id
self.tracks = tracks
self.asin = asin
self.albumtype = albumtype
self.va = va
self.year = year
self.month = month
self.day = day
self.label = label
self.mediums = mediums
self.artist_sort = artist_sort
self.releasegroup_id = releasegroup_id
self.catalognum = catalognum
self.script = script
self.language = language
self.country = country
self.style = style
self.genre = genre
self.albumstatus = albumstatus
self.media = media
self.albumdisambig = albumdisambig
self.releasegroupdisambig = releasegroupdisambig
self.artist_credit = artist_credit
self.original_year = original_year
self.original_month = original_month
self.original_day = original_day
self.data_source = data_source
self.data_url = data_url
self.discogs_albumid = discogs_albumid
self.discogs_labelid = discogs_labelid
self.discogs_artistid = discogs_artistid
# Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode.
# https://github.com/alastair/python-musicbrainz-ngs/issues/85
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object, and the
constituent `TrackInfo` objects, are decoded to Unicode.
"""
for fld in ['album', 'artist', 'albumtype', 'label', 'artist_sort',
'catalognum', 'script', 'language', 'country', 'style',
'genre', 'albumstatus', 'albumdisambig',
'releasegroupdisambig', 'artist_credit',
'media', 'discogs_albumid', 'discogs_labelid',
'discogs_artistid']:
value = getattr(self, fld)
if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore'))
if self.tracks:
for track in self.tracks:
track.decode(codec)
class TrackInfo(object):
"""Describes a canonical track present on a release. Appears as part
of an AlbumInfo's ``tracks`` list. Consists of these data members:
- ``title``: name of the track
- ``track_id``: MusicBrainz ID; UUID fragment only
- ``release_track_id``: MusicBrainz ID respective to a track on a
particular release; UUID fragment only
- ``artist``: individual track artist name
- ``artist_id``
- ``length``: float: duration of the track in seconds
- ``index``: position on the entire release
- ``media``: delivery mechanism (Vinyl, etc.)
- ``medium``: the disc number this track appears on in the album
- ``medium_index``: the track's position on the disc
- ``medium_total``: the number of tracks on the item's disc
- ``artist_sort``: name of the track artist for sorting
- ``disctitle``: name of the individual medium (subtitle)
- ``artist_credit``: Recording-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
- ``lyricist``: individual track lyricist name
- ``composer``: individual track composer name
- ``composer_sort``: individual track composer sort name
- ``arranger`: individual track arranger name
- ``track_alt``: alternative track number (tape, vinyl, etc.)
- ``work`: individual track work title
- ``mb_workid`: individual track work id
- ``work_disambig`: individual track work diambiguation
Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index``
are all 1-based.
"""
def __init__(self, title, track_id, release_track_id=None, artist=None,
artist_id=None, length=None, index=None, medium=None,
medium_index=None, medium_total=None, artist_sort=None,
disctitle=None, artist_credit=None, data_source=None,
data_url=None, media=None, lyricist=None, composer=None,
composer_sort=None, arranger=None, track_alt=None,
work=None, mb_workid=None, work_disambig=None, bpm=None,
initial_key=None, genre=None):
self.title = title
self.track_id = track_id
self.release_track_id = release_track_id
self.artist = artist
self.artist_id = artist_id
self.length = length
self.index = index
self.media = media
self.medium = medium
self.medium_index = medium_index
self.medium_total = medium_total
self.artist_sort = artist_sort
self.disctitle = disctitle
self.artist_credit = artist_credit
self.data_source = data_source
self.data_url = data_url
self.lyricist = lyricist
self.composer = composer
self.composer_sort = composer_sort
self.arranger = arranger
self.track_alt = track_alt
self.work = work
self.mb_workid = mb_workid
self.work_disambig = work_disambig
self.bpm = bpm
self.initial_key = initial_key
self.genre = genre
# As above, work around a bug in python-musicbrainz-ngs.
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object are decoded
to Unicode.
"""
for fld in ['title', 'artist', 'medium', 'artist_sort', 'disctitle',
'artist_credit', 'media']:
value = getattr(self, fld)
if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore'))
# Candidate distance scoring.
# Parameters for string distance function.
# Words that can be moved to the end of a string using a comma.
SD_END_WORDS = ['the', 'a', 'an']
# Reduced weights for certain portions of the string.
SD_PATTERNS = [
(r'^the ', 0.1),
(r'[\[\(]?(ep|single)[\]\)]?', 0.0),
(r'[\[\(]?(featuring|feat|ft)[\. :].+', 0.1),
(r'\(.*?\)', 0.3),
(r'\[.*?\]', 0.3),
(r'(, )?(pt\.|part) .+', 0.2),
]
# Replacements to use before testing distance.
SD_REPLACE = [
(r'&', 'and'),
]
def _string_dist_basic(str1, str2):
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
assert isinstance(str1, six.text_type)
assert isinstance(str2, six.text_type)
str1 = as_string(unidecode(str1))
str2 = as_string(unidecode(str2))
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2:
return 0.0
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2):
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
if str1 is None and str2 is None:
return 0.0
if str1 is None or str2 is None:
return 1.0
str1 = str1.lower()
str2 = str2.lower()
# Don't penalize strings that move certain words to the end. For
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
if str1.endswith(', %s' % word):
str1 = '%s %s' % (word, str1[:-len(word) - 2])
if str2.endswith(', %s' % word):
str2 = '%s %s' % (word, str2[:-len(word) - 2])
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
str1 = re.sub(pat, repl, str1)
str2 = re.sub(pat, repl, str2)
# Change the weight for certain string portions matched by a set
# of regular expressions. We gradually change the strings and build
# up penalties associated with parts of the string that were
# deleted.
base_dist = _string_dist_basic(str1, str2)
penalty = 0.0
for pat, weight in SD_PATTERNS:
# Get strings that drop the pattern.
case_str1 = re.sub(pat, '', str1)
case_str2 = re.sub(pat, '', str2)
if case_str1 != str1 or case_str2 != str2:
# If the pattern was present (i.e., it is deleted in the
# the current case), recalculate the distances for the
# modified strings.
case_dist = _string_dist_basic(case_str1, case_str2)
case_delta = max(0.0, base_dist - case_dist)
if case_delta == 0.0:
continue
# Shift our baseline strings down (to avoid rematching the
# same part of the string) and add a scaled distance
# amount to the penalties.
str1 = case_str1
str2 = case_str2
base_dist = case_dist
penalty += weight * case_delta
return base_dist + penalty
class LazyClassProperty(object):
"""A decorator implementing a read-only property that is *lazy* in
the sense that the getter is only invoked once. Subsequent accesses
through *any* instance use the cached result.
"""
def __init__(self, getter):
self.getter = getter
self.computed = False
def __get__(self, obj, owner):
if not self.computed:
self.value = self.getter(owner)
self.computed = True
return self.value
@total_ordering
@six.python_2_unicode_compatible
class Distance(object):
"""Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance
for each individual penalty.
"""
def __init__(self):
self._penalties = {}
@LazyClassProperty
def _weights(cls): # noqa
"""A dictionary from keys to floating-point weights.
"""
weights_view = config['match']['distance_weights']
weights = {}
for key in weights_view.keys():
weights[key] = weights_view[key].as_number()
return weights
# Access the components and their aggregates.
@property
def distance(self):
"""Return a weighted and normalized distance across all
penalties.
"""
dist_max = self.max_distance
if dist_max:
return self.raw_distance / self.max_distance
return 0.0
@property
def max_distance(self):
"""Return the maximum distance penalty (normalization factor).
"""
dist_max = 0.0
for key, penalty in self._penalties.items():
dist_max += len(penalty) * self._weights[key]
return dist_max
@property
def raw_distance(self):
"""Return the raw (denormalized) distance.
"""
dist_raw = 0.0
for key, penalty in self._penalties.items():
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self):
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
"""
list_ = []
for key in self._penalties:
dist = self[key]
if dist:
list_.append((key, dist))
# Convert distance into a negative float we can sort items in
# ascending order (for keys, when the penalty is equal) and
# still get the items with the biggest distance first.
return sorted(
list_,
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return self.distance == other
# Behave like a float.
def __lt__(self, other):
return self.distance < other
def __float__(self):
return self.distance
def __sub__(self, other):
return self.distance - other
def __rsub__(self, other):
return other - self.distance
def __str__(self):
return "{0:.2f}".format(self.distance)
# Behave like a dict.
def __getitem__(self, key):
"""Returns the weighted distance for a named penalty.
"""
dist = sum(self._penalties[key]) * self._weights[key]
dist_max = self.max_distance
if dist_max:
return dist / dist_max
return 0.0
def __iter__(self):
return iter(self.items())
def __len__(self):
return len(self.items())
def keys(self):
return [key for key, _ in self.items()]
def update(self, dist):
"""Adds all the distance penalties from `dist`.
"""
if not isinstance(dist, Distance):
raise ValueError(
u'`dist` must be a Distance object, not {0}'.format(type(dist))
)
for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties)
# Adding components.
def _eq(self, value1, value2):
"""Returns True if `value1` is equal to `value2`. `value1` may
be a compiled regular expression, in which case it will be
matched against `value2`.
"""
if isinstance(value1, Pattern):
return bool(value1.match(value2))
return value1 == value2
def add(self, key, dist):
"""Adds a distance penalty. `key` must correspond with a
configured weight setting. `dist` must be a float between 0.0
and 1.0, and will be added to any existing distance penalties
for the same key.
"""
if not 0.0 <= dist <= 1.0:
raise ValueError(
u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist)
)
self._penalties.setdefault(key, []).append(dist)
def add_equality(self, key, value, options):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
expression, it will be considered equal if it matches against
`value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
for opt in options:
if self._eq(opt, value):
dist = 0.0
break
else:
dist = 1.0
self.add(key, dist)
def add_expr(self, key, expr):
"""Adds a distance penalty of 1.0 if `expr` evaluates to True,
or 0.0.
"""
if expr:
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_number(self, key, number1, number2):
"""Adds a distance penalty of 1.0 for each number of difference
between `number1` and `number2`, or 0.0 when there is no
difference. Use this when there is no upper limit on the
difference between the two numbers.
"""
diff = abs(number1 - number2)
if diff:
for i in range(diff):
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_priority(self, key, value, options):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
for the first option, or 1.0 if there is no matching option. If
an option is a compiled regular expression, it will be
considered equal if it matches against `value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
unit = 1.0 / (len(options) or 1)
for i, opt in enumerate(options):
if self._eq(opt, value):
dist = i * unit
break
else:
dist = 1.0
self.add(key, dist)
def add_ratio(self, key, number1, number2):
"""Adds a distance penalty for `number1` as a ratio of `number2`.
`number1` is bound at 0 and `number2`.
"""
number = float(max(min(number1, number2), 0))
if number2:
dist = number / number2
else:
dist = 0.0
self.add(key, dist)
def add_string(self, key, str1, str2):
"""Adds a distance penalty based on the edit distance between
`str1` and `str2`.
"""
dist = string_dist(str1, str2)
self.add(key, dist)
# Structures that compose all the information for a candidate match.
AlbumMatch = namedtuple('AlbumMatch', ['distance', 'info', 'mapping',
'extra_items', 'extra_tracks'])
TrackMatch = namedtuple('TrackMatch', ['distance', 'info'])
# Aggregation of sources.
def album_for_mbid(release_id):
"""Get an AlbumInfo object for a MusicBrainz release ID. Return None
if the ID is not found.
"""
try:
album = mb.album_for_id(release_id)
if album:
plugins.send(u'albuminfo_received', info=album)
return album
except mb.MusicBrainzAPIError as exc:
exc.log(log)
def track_for_mbid(recording_id):
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None
if the ID is not found.
"""
try:
track = mb.track_for_id(recording_id)
if track:
plugins.send(u'trackinfo_received', info=track)
return track
except mb.MusicBrainzAPIError as exc:
exc.log(log)
def albums_for_id(album_id):
"""Get a list of albums for an ID."""
a = album_for_mbid(album_id)
if a:
yield a
for a in plugins.album_for_id(album_id):
if a:
plugins.send(u'albuminfo_received', info=a)
yield a
def tracks_for_id(track_id):
"""Get a list of tracks for an ID."""
t = track_for_mbid(track_id)
if t:
yield t
for t in plugins.track_for_id(track_id):
if t:
plugins.send(u'trackinfo_received', info=t)
yield t
@plugins.notify_info_yielded(u'albuminfo_received')
def album_candidates(items, artist, album, va_likely):
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be
entered by the user. ``va_likely`` is a boolean indicating whether
the album is likely to be a "various artists" release.
"""
# Base candidates if we have album and artist to match.
if artist and album:
try:
for candidate in mb.match_album(artist, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Also add VA matches from MusicBrainz where appropriate.
if va_likely and album:
try:
for candidate in mb.match_album(None, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Candidates from plugins.
for candidate in plugins.candidates(items, artist, album, va_likely):
yield candidate
@plugins.notify_info_yielded(u'trackinfo_received')
def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.
"""
# MusicBrainz candidates.
if artist and title:
try:
for candidate in mb.match_track(artist, title):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Plugin candidates.
for candidate in plugins.item_candidates(item, artist, title):
yield candidate
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
from ..core.models import TimeStampedModel
class Medicos(TimeStampedModel):
"""
Medicos de la institucion
"""
nombre = models.CharField(max_length=100, blank=False, null=False)
apellido = models.CharField(max_length=100, blank=False, null=False)
dni = models.IntegerField(max_length=8, blank=False, null=False, unique=True)
foto = ThumbnailerImageField(blank=True, null=True, verbose_name=u"Foto del médico",
upload_to="medicos_fotos")
def get_absolute_url(self):
return reverse('medicos:list')
def __unicode__(self):
return self.nombre + ' ' + self.apellido
|
from spacy.lang.en import English
from spacy.lang.de import German
from torchtext.datasets import IWSLT2017
from torchtext.vocab import build_vocab_from_iterator
# Define special symbols and indices
PAD_IDX, UNK_IDX, BOS_IDX, EOS_IDX = 0, 1, 2, 3
# Make sure the tokens are in order of their indices to properly insert them in vocab
special_symbols = ['<pad>', '<unk>', '<bos>', '<eos>']
# --- build vocabularies ---
class Tokenizer:
def __init__(self, spacy):
self.spacy = spacy
def __call__(self, text):
return [t.text for t in self.spacy(text)]
def vocab():
tokenizer_en = Tokenizer(English().tokenizer)
tokenizer_de = Tokenizer(German().tokenizer)
def build_vocab(iter, tokenizer, max_len=100):
filtered = filter(lambda t: len(t) > 0 and len(t) <= max_len, map(tokenizer, iter))
vocab = build_vocab_from_iterator(filtered, min_freq=2, specials=special_symbols)
vocab.set_default_index(vocab['<unk>'])
return vocab
train_iter = IWSLT2017(split='train', language_pair=('en', 'de'))
vocab_en = build_vocab(map(lambda p: p[0], train_iter), tokenizer_en)
train_iter = IWSLT2017(split='train', language_pair=('en', 'de'))
vocab_de = build_vocab(map(lambda p: p[1], train_iter), tokenizer_de)
return vocab_en, vocab_de
# --- tokenize and encode texts ---
def indices(vocab_en, vocab_de):
tokenizer_en = Tokenizer(English().tokenizer)
tokenizer_de = Tokenizer(German().tokenizer)
def to_indices(pair):
src, tgt = pair
return vocab_en(tokenizer_en(src)), vocab_de(tokenizer_de(tgt))
train_iter = IWSLT2017(split='train', language_pair=('en', 'de'))
test_iter = IWSLT2017(split='test', language_pair=('en', 'de'))
train_indices = sorted(map(to_indices, train_iter), key = lambda x: (len(x[0]), len(x[1])))
test_indices = sorted(map(to_indices, test_iter), key = lambda x: (len(x[0]), len(x[1])))
return train_indices, test_indices
# --- batchify ---
import torch
import random
from .batch import Batch
def pad(lists, phrase_len):
return torch.LongTensor([l + [PAD_IDX] * (phrase_len - len(l)) for l in lists])
class Batchify:
def __init__(self, data, batch_tokens=1000, pad_idx=PAD_IDX, cuda=False):
assert type(data) == list
self.data = data
self.batch_tokens = batch_tokens
self.pad_idx = pad_idx
self.cuda = cuda
self.batches = None
def next(self):
if not self.batches:
self._prepare_batches()
self._batches_to_tensors()
self.batches = iter(self.batches)
src, tgt = next(self.batches)
if self.cuda:
src, tgt = src.cuda(), tgt.cuda()
return Batch(src, tgt, self.pad_idx)
def _prepare_batches(self):
begin, end = 0, 0
max_src_length = 0
max_trg_length = 0
indices = []
for src, trg in self.data:
max_src_length = max(max_src_length, len(src) + 2)
max_trg_length = max(max_trg_length, len(trg) + 2)
if (end-begin+1)*(max_src_length+max_trg_length) > self.batch_tokens:
indices.append((begin, end))
begin = end
end += 1
indices.append((begin, end))
random.shuffle(indices)
self.batches = indices
def _batches_to_tensors(self):
ans = []
for begin, end in self.batches:
src = []
tgt = []
for sr, tg in self.data[begin:end]:
src.append([BOS_IDX] + sr + [EOS_IDX])
tgt.append([BOS_IDX] + tg + [EOS_IDX])
src = pad(src, max(map(len, src)))
tgt = pad(tgt, max(map(len, tgt)))
ans.append((src, tgt))
self.batches = ans
def __next__(self):
return self.next()
def __iter__(self):
return self
# --- train model ---
from .makemodel import make_model
from .labelsmoothing import LabelSmoothing
from .noamopt import NoamOpt
from .runepoch import run_epoch
from .simplelosscompute import SimpleLossCompute
from .greedydecode import greedy_decode
class Translation:
def __init__(self, src_vocab, tgt_vocab, padding_idx=PAD_IDX, cuda=False, model=None):
self.tgt_vocab = tgt_vocab
self.padding_idx = padding_idx
self.cuda = cuda
self.train_loss_history = []
self.test_loss_history = []
if model:
self.model = model
else:
self.model = make_model(src_vocab, tgt_vocab)
self.criterion = LabelSmoothing(size=self.tgt_vocab, padding_idx=self.padding_idx, smoothing=.1)
if cuda:
self.model.cuda()
self.criterion.cuda()
self.model_opt = NoamOpt(self.model.src_embed[0].n_features, 1, 4000,
torch.optim.Adam(self.model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
def train(self, train, test, nepoch=10, batch_tokens=1000, base_lr=1, warmup=4000):
self.model_opt.factor = base_lr
self.model_opt.warmup = warmup
for epoch in range(nepoch):
self.model.train()
b = Batchify(train, batch_tokens=batch_tokens, cuda=self.cuda)
loss = run_epoch(b, self.model, SimpleLossCompute(self.criterion, self.model_opt))
self.train_loss_history.append(loss)
self.model.eval()
with torch.no_grad():
b = Batchify(test, batch_tokens=batch_tokens, cuda=self.cuda)
loss = run_epoch(b, self.model, SimpleLossCompute(self.criterion, None))
print(f"Epoch {epoch} completed with validation loss per token {loss}")
self.test_loss_history.append(loss)
self.save("checkpoint.bin")
def translate(self, src, start_symbol=BOS_IDX, max_len=5000):
if type(src) is list:
src = torch.LongTensor(src)
if self.cuda:
src.cuda()
while len(src.shape) != 2:
src.unsqueeze_(0)
ans = greedy_decode(self.model, src, src != self.padding_idx, max_len, start_symbol)
return ans[:ans.index(EOS_IDX)]
def save(self, path):
with open(path, "wb") as out:
torch.save(self.model, out)
@staticmethod
def load(src_vocab, tgt_vocab, path, cuda=True):
map_location = "cuda" if cuda else "cpu"
model = torch.load(path, map_location=map_location)
return Translation(src_vocab, tgt_vocab, cuda=cuda, model=model)
if __name__ == "__main__":
vocab_en, vocab_de = vocab()
train_indices, test_indices = indices(vocab_en, vocab_de)
trans = Translation(len(vocab_en), len(vocab_de))
trans.train(train_indices, test_indices)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Collection of serial interfaces to OLED devices.
"""
# Example usage:
#
# from luma.core.interface.serial import i2c, spi
# from luma.core.render import canvas
# from luma.oled.device import ssd1306, sh1106
# from PIL import ImageDraw
#
# serial = i2c(port=1, address=0x3C)
# device = ssd1306(serial)
#
# with canvas(device) as draw:
# draw.rectangle(device.bounding_box, outline="white", fill="black")
# draw.text(30, 40, "Hello World", fill="white")
#
# As soon as the with-block scope level is complete, the graphics primitives
# will be flushed to the device.
#
# Creating a new canvas is effectively 'carte blanche': If you want to retain
# an existing canvas, then make a reference like:
#
# c = canvas(device)
# for X in ...:
# with c as draw:
# draw.rectangle(...)
#
# As before, as soon as the with block completes, the canvas buffer is flushed
# to the device
from luma.core.device import device
import luma.core.error
import luma.core.framebuffer
import luma.oled.const
__all__ = ["ssd1306", "ssd1322", "ssd1325", "ssd1327", "ssd1331", "ssd1351", "sh1106"]
class sh1106(device):
"""
Serial interface to a monochrome SH1106 OLED display.
On creation, an initialization sequence is pumped to the display
to properly configure it. Further control commands can then be called to
affect the brightness and other settings.
"""
def __init__(self, serial_interface=None, width=128, height=64, rotate=0, **kwargs):
super(sh1106, self).__init__(luma.oled.const.sh1106, serial_interface)
self.capabilities(width, height, rotate)
self._pages = self._h // 8
settings = {
(128, 128): dict(multiplex=0xFF, displayoffset=0x02),
(128, 64): dict(multiplex=0x3F, displayoffset=0x00),
(128, 32): dict(multiplex=0x20, displayoffset=0x0F)
}.get((width, height))
if settings is None:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self.command(
self._const.DISPLAYOFF,
self._const.MEMORYMODE,
self._const.SETHIGHCOLUMN, 0xB0, 0xC8,
self._const.SETLOWCOLUMN, 0x10, 0x40,
self._const.SETSEGMENTREMAP,
self._const.NORMALDISPLAY,
self._const.SETMULTIPLEX, settings['multiplex'],
self._const.DISPLAYALLON_RESUME,
self._const.SETDISPLAYOFFSET, settings['displayoffset'],
self._const.SETDISPLAYCLOCKDIV, 0xF0,
self._const.SETPRECHARGE, 0x22,
self._const.SETCOMPINS, 0x12,
self._const.SETVCOMDETECT, 0x20,
self._const.CHARGEPUMP, 0x14)
self.contrast(0x7F)
self.clear()
self.show()
def display(self, image):
"""
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the SH1106
OLED display.
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
set_page_address = 0xB0
image_data = image.getdata()
pixels_per_page = self.width * 8
buf = bytearray(self.width)
for y in range(0, int(self._pages * pixels_per_page), pixels_per_page):
self.command(set_page_address, 0x02, 0x10)
set_page_address += 1
offsets = [y + self.width * i for i in range(8)]
for x in range(self.width):
buf[x] = \
(image_data[x + offsets[0]] and 0x01) | \
(image_data[x + offsets[1]] and 0x02) | \
(image_data[x + offsets[2]] and 0x04) | \
(image_data[x + offsets[3]] and 0x08) | \
(image_data[x + offsets[4]] and 0x10) | \
(image_data[x + offsets[5]] and 0x20) | \
(image_data[x + offsets[6]] and 0x40) | \
(image_data[x + offsets[7]] and 0x80)
self.data(list(buf))
class ssd1306(device):
"""
Serial interface to a monochrome SSD1306 OLED display.
On creation, an initialization sequence is pumped to the display
to properly configure it. Further control commands can then be called to
affect the brightness and other settings.
"""
def __init__(self, serial_interface=None, width=128, height=64, rotate=0, **kwargs):
super(ssd1306, self).__init__(luma.oled.const.ssd1306, serial_interface)
self.capabilities(width, height, rotate)
# Supported modes
settings = {
(128, 64): dict(multiplex=0x3F, displayclockdiv=0x80, compins=0x12),
(128, 32): dict(multiplex=0x1F, displayclockdiv=0x80, compins=0x02),
(96, 16): dict(multiplex=0x0F, displayclockdiv=0x60, compins=0x02),
(64, 48): dict(multiplex=0x2F, displayclockdiv=0x80, compins=0x12),
(64, 32): dict(multiplex=0x1F, displayclockdiv=0x80, compins=0x12)
}.get((width, height))
if settings is None:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self._pages = height // 8
self._mask = [1 << (i // width) % 8 for i in range(width * height)]
self._offsets = [(width * (i // (width * 8))) + (i % width) for i in range(width * height)]
self._colstart = (0x80 - self._w) // 2
self._colend = self._colstart + self._w
self.command(
self._const.DISPLAYOFF,
self._const.SETDISPLAYCLOCKDIV, settings['displayclockdiv'],
self._const.SETMULTIPLEX, settings['multiplex'],
self._const.SETDISPLAYOFFSET, 0x00,
self._const.SETSTARTLINE,
self._const.CHARGEPUMP, 0x14,
self._const.MEMORYMODE, 0x00,
self._const.SETSEGMENTREMAP,
self._const.COMSCANDEC,
self._const.SETCOMPINS, settings['compins'],
self._const.SETPRECHARGE, 0xF1,
self._const.SETVCOMDETECT, 0x40,
self._const.DISPLAYALLON_RESUME,
self._const.NORMALDISPLAY)
self.contrast(0xCF)
self.clear()
self.show()
def display(self, image):
"""
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the SSD1306
OLED display.
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
self.command(
# Column start/end address
self._const.COLUMNADDR, self._colstart, self._colend - 1,
# Page start/end address
self._const.PAGEADDR, 0x00, self._pages - 1)
buf = bytearray(self._w * self._pages)
off = self._offsets
mask = self._mask
idx = 0
for pix in image.getdata():
if pix > 0:
buf[off[idx]] |= mask[idx]
idx += 1
self.data(list(buf))
class ssd1331(device):
"""
Serial interface to a 16-bit color (5-6-5 RGB) SSD1331 OLED display.
On creation, an initialization sequence is pumped to
the display to properly configure it. Further control commands can then be
called to affect the brightness and other settings.
:param serial_interface: the serial interface (usually a
:py:class`luma.core.interface.serial.spi` instance) to delegate sending
data and commands through.
:param width: the number of horizontal pixels (optional, defaults to 96).
:type width: int
:param height: the number of vertical pixels (optional, defaults to 64).
:type height: int
:param rotate: an integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param framebuffer: Framebuffering strategy, currently values of
``diff_to_previous`` or ``full_frame`` are only supported.
:type framebuffer: str
"""
def __init__(self, serial_interface=None, width=96, height=64, rotate=0,
framebuffer="diff_to_previous", **kwargs):
super(ssd1331, self).__init__(luma.oled.const.common, serial_interface)
self.capabilities(width, height, rotate, mode="RGB")
self.framebuffer = getattr(luma.core.framebuffer, framebuffer)(self)
if width != 96 or height != 64:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self.command(
0xAE, # Display off
0xA0, 0x72, # Seg remap
0xA1, 0x00, # Set Display start line
0xA2, 0x00, # Set display offset
0xA4, # Normal display
0xA8, 0x3F, # Set multiplex
0xAD, 0x8E, # Master configure
0xB0, 0x0B, # Power save mode
0xB1, 0x74, # Phase12 period
0xB3, 0xD0, # Clock divider
0x8A, 0x80, # Set precharge speed A
0x8B, 0x80, # Set precharge speed B
0x8C, 0x80, # Set precharge speed C
0xBB, 0x3E, # Set pre-charge voltage
0xBE, 0x3E, # Set voltage
0x87, 0x0F) # Master current control
self.contrast(0xFF)
self.clear()
self.show()
def display(self, image):
"""
Renders a 24-bit RGB image to the SSD1331 OLED display.
:param image: the image to render.
:type image: PIL.Image.Image
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
if self.framebuffer.redraw_required(image):
left, top, right, bottom = self.framebuffer.bounding_box
width = right - left
height = bottom - top
self.command(
0x15, left, right - 1, # Set column addr
0x75, top, bottom - 1) # Set row addr
i = 0
buf = bytearray(width * height * 2)
for r, g, b in self.framebuffer.getdata():
if not(r == g == b == 0):
# 65K format 1
buf[i] = r & 0xF8 | g >> 5
buf[i + 1] = g << 5 & 0xE0 | b >> 3
i += 2
self.data(list(buf))
def contrast(self, level):
"""
Switches the display contrast to the desired level, in the range
0-255. Note that setting the level to a low (or zero) value will
not necessarily dim the display to nearly off. In other words,
this method is **NOT** suitable for fade-in/out animation.
:param level: Desired contrast level in the range of 0-255.
:type level: int
"""
assert(0 <= level <= 255)
self.command(0x81, level, # Set contrast A
0x82, level, # Set contrast B
0x83, level) # Set contrast C
class ssd1351(device):
"""
Serial interface to the 16-bit color (5-6-5 RGB) SSD1351 OLED display.
On creation, an initialization sequence is pumped to
the display to properly configure it. Further control commands can then be
called to affect the brightness and other settings.
:param serial_interface: the serial interface (usually a
:py:class`luma.core.interface.serial.spi` instance) to delegate sending
data and commands through.
:param width: the number of horizontal pixels (optional, defaults to 128).
:type width: int
:param height: the number of vertical pixels (optional, defaults to 128).
:type height: int
:param rotate: an integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param framebuffer: Framebuffering strategy, currently values of
``diff_to_previous`` or ``full_frame`` are only supported.
:type framebuffer: str
:param bgr: Set to ``True`` if device pixels are BGR order (rather than RGB).
:type bgr: bool
:param h_offset: horizontal offset (in pixels) of screen to device memory
(default: 0)
:type h_offset: int
:param v_offset: vertical offset (in pixels) of screen to device memory
(default: 0)
:type h_offset: int
.. versionadded:: 2.3.0
"""
def __init__(self, serial_interface=None, width=128, height=128, rotate=0,
framebuffer="diff_to_previous", h_offset=0, v_offset=0,
bgr=False, **kwargs):
super(ssd1351, self).__init__(luma.oled.const.common, serial_interface)
self.capabilities(width, height, rotate, mode="RGB")
self.framebuffer = getattr(luma.core.framebuffer, framebuffer)(self)
if h_offset != 0 or v_offset != 0:
def offset(bbox):
left, top, right, bottom = bbox
return (left + h_offset, top + v_offset, right + h_offset, bottom + v_offset)
self.apply_offsets = offset
else:
self.apply_offsets = lambda bbox: bbox
if (width, height) not in [(96, 96), (128, 128)]:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
# RGB or BGR order
order = 0x02 if bgr else 0x00
self.command(0xFD, 0x12) # Unlock IC MCU interface
self.command(0xFD, 0xB1) # Command A2,B1,B3,BB,BE,C1 accessible if in unlock state
self.command(0xAE) # Display off
self.command(0xB3, 0xF1) # Clock divider
self.command(0xCA, 0x7F) # Mux ratio
self.command(0x15, 0x00, width - 1) # Set column address
self.command(0x75, 0x00, height - 1) # Set row address
self.command(0xA0, 0x74 | order) # Segment remapping
self.command(0xA1, 0x00) # Set Display start line
self.command(0xA2, 0x00) # Set display offset
self.command(0xB5, 0x00) # Set GPIO
self.command(0xAB, 0x01) # Function select (internal - diode drop)
self.command(0xB1, 0x32) # Precharge
self.command(0xB4, 0xA0, 0xB5, 0x55) # Set segment low voltage
self.command(0xBE, 0x05) # Set VcomH voltage
self.command(0xC7, 0x0F) # Contrast master
self.command(0xB6, 0x01) # Precharge2
self.command(0xA6) # Normal display
self.contrast(0xFF)
self.clear()
self.show()
def display(self, image):
"""
Renders a 24-bit RGB image to the SSD1351 OLED display.
:param image: the image to render.
:type image: PIL.Image.Image
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
if self.framebuffer.redraw_required(image):
left, top, right, bottom = self.apply_offsets(self.framebuffer.bounding_box)
width = right - left
height = bottom - top
self.command(0x15, left, right - 1) # Set column addr
self.command(0x75, top, bottom - 1) # Set row addr
self.command(0x5C) # Write RAM
i = 0
buf = bytearray(width * height * 2)
for r, g, b in self.framebuffer.getdata():
if not(r == g == b == 0):
# 65K format 1
buf[i] = r & 0xF8 | g >> 5
buf[i + 1] = g << 5 & 0xE0 | b >> 3
i += 2
self.data(list(buf))
def contrast(self, level):
"""
Switches the display contrast to the desired level, in the range
0-255. Note that setting the level to a low (or zero) value will
not necessarily dim the display to nearly off. In other words,
this method is **NOT** suitable for fade-in/out animation.
:param level: Desired contrast level in the range of 0-255.
:type level: int
"""
assert(0 <= level <= 255)
self.command(0xC1, level, level, level)
def command(self, cmd, *args):
"""
Sends a command and an (optional) sequence of arguments through to the
delegated serial interface. Note that the arguments are passed through
as data.
"""
self._serial_interface.command(cmd)
if len(args) > 0:
self._serial_interface.data(list(args))
class ssd1322(device):
"""
Serial interface to a 4-bit greyscale SSD1322 OLED display.
On creation, an initialization sequence is pumped to the
display to properly configure it. Further control commands can then be
called to affect the brightness and other settings.
:param serial_interface: the serial interface (usually a
:py:class`luma.core.interface.serial.spi` instance) to delegate sending
data and commands through.
:param width: the number of horizontal pixels (optional, defaults to 96).
:type width: int
:param height: the number of vertical pixels (optional, defaults to 64).
:type height: int
:param rotate: an integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: Supplying "1" or "RGB" effects a different rendering
mechanism, either to monochrome or 4-bit greyscale.
:type mode: str
:param framebuffer: Framebuffering strategy, currently values of
``diff_to_previous`` or ``full_frame`` are only supported
:type framebuffer: str
"""
def __init__(self, serial_interface=None, width=256, height=64, rotate=0,
mode="RGB", framebuffer="diff_to_previous", **kwargs):
super(ssd1322, self).__init__(luma.oled.const.ssd1322, serial_interface)
self.capabilities(width, height, rotate, mode)
self.framebuffer = getattr(luma.core.framebuffer, framebuffer)(self)
self.populate = self._render_mono if mode == "1" else self._render_greyscale
self.column_offset = (480 - width) // 2
if width <= 0 or width > 256 or \
height <= 0 or height > 64 or \
width % 16 != 0 or height % 16 != 0:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self.command(0xFD, 0x12) # Unlock IC
self.command(0xA4) # Display off (all pixels off)
self.command(0xB3, 0xF2) # Display divide clockratio/freq
self.command(0xCA, 0x3F) # Set MUX ratio
self.command(0xA2, 0x00) # Display offset
self.command(0xA1, 0x00) # Display start Line
self.command(0xA0, 0x14, 0x11) # Set remap & dual COM Line
self.command(0xB5, 0x00) # Set GPIO (disabled)
self.command(0xAB, 0x01) # Function select (internal Vdd)
self.command(0xB4, 0xA0, 0xFD) # Display enhancement A (External VSL)
self.command(0xC7, 0x0F) # Master contrast (reset)
self.command(0xB9) # Set default greyscale table
self.command(0xB1, 0xF0) # Phase length
self.command(0xD1, 0x82, 0x20) # Display enhancement B (reset)
self.command(0xBB, 0x0D) # Pre-charge voltage
self.command(0xB6, 0x08) # 2nd precharge period
self.command(0xBE, 0x00) # Set VcomH
self.command(0xA6) # Normal display (reset)
self.command(0xA9) # Exit partial display
self.contrast(0x7F) # Reset
self.clear()
self.show()
def _render_mono(self, buf, pixel_data):
i = 0
for pix in pixel_data:
if pix > 0:
if i % 2 == 0:
buf[i // 2] = 0xF0
else:
buf[i // 2] |= 0x0F
i += 1
def _render_greyscale(self, buf, pixel_data):
i = 0
for r, g, b in pixel_data:
# RGB->Greyscale luma calculation into 4-bits
grey = (r * 306 + g * 601 + b * 117) >> 14
if grey > 0:
if i % 2 == 0:
buf[i // 2] = (grey << 4)
else:
buf[i // 2] |= grey
i += 1
def display(self, image):
"""
Takes a 1-bit monochrome or 24-bit RGB image and renders it
to the SSD1322 OLED display. RGB pixels are converted to 4-bit
greyscale values using a simplified Luma calculation, based on
*Y'=0.299R'+0.587G'+0.114B'*.
:param image: the image to render
:type image: PIL.Image.Image
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
if self.framebuffer.redraw_required(image):
left, top, right, bottom = self.framebuffer.inflate_bbox()
width = right - left
height = bottom - top
pix_start = self.column_offset + left
coladdr_start = pix_start >> 2
coladdr_end = (pix_start + width >> 2) - 1
self.command(0x15, coladdr_start, coladdr_end) # set column addr
self.command(0x75, top, bottom - 1) # Reset row addr
self.command(0x5C) # Enable MCU to write data into RAM
buf = bytearray(width * height >> 1)
self.populate(buf, self.framebuffer.getdata())
self.data(list(buf))
def command(self, cmd, *args):
"""
Sends a command and an (optional) sequence of arguments through to the
delegated serial interface. Note that the arguments are passed through
as data.
"""
self._serial_interface.command(cmd)
if len(args) > 0:
self._serial_interface.data(list(args))
class ssd1325(device):
"""
Serial interface to a 4-bit greyscale SSD1325 OLED display.
On creation, an initialization sequence is pumped to the
display to properly configure it. Further control commands can then be
called to affect the brightness and other settings.
"""
def __init__(self, serial_interface=None, width=128, height=64, rotate=0,
mode="RGB", **kwargs):
super(ssd1325, self).__init__(luma.core.const.common, serial_interface)
self.capabilities(width, height, rotate, mode)
self._buffer_size = width * height // 2
if width != 128 or height != 64:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self.command(
0xAE, # Diplay off (all pixels off)
0xB3, 0xF2, # Display divide clockratio/freq
0xA8, 0x3F, # Set MUX ratio
0xA2, 0x4C, # Display offset
0xA1, 0x00, # Display start line
0xAD, 0x02, # Master configuration (external Vcc)
0xA0, 0x50, # Set remap (enable COM remap & split odd/even)
0x86, # Set current range (full)
0xB8, 0x01, 0x11, # Set greyscale table
0x22, 0x32, 0x43, # .. cont
0x54, 0x65, 0x76, # .. cont
0xB2, 0x51, # Set row period
0xB1, 0x55, # Set phase length
0xB4, 0x03, # Set pre-charge compensation level
0xB0, 0x28, # Set pre-charge compensation enable
0xBC, 0x01, # Pre-charge voltage
0xBE, 0x00, # Set VcomH
0xBF, 0x02, # Set VSL (not connected)
0xA4) # Normal dislay
self.contrast(0x7F)
self.clear()
self.show()
def _render_mono(self, buf, image):
i = 0
for pix in image.getdata():
if pix > 0:
if i % 2 == 0:
buf[i // 2] = 0x0F
else:
buf[i // 2] |= 0xF0
i += 1
def _render_greyscale(self, buf, image):
i = 0
for r, g, b in image.getdata():
# RGB->Greyscale luma calculation into 4-bits
grey = (r * 306 + g * 601 + b * 117) >> 14
if grey > 0:
if i % 2 == 0:
buf[i // 2] = grey
else:
buf[i // 2] |= (grey << 4)
i += 1
def display(self, image):
"""
Takes a 1-bit monochrome or 24-bit RGB :py:mod:`PIL.Image` and dumps it
to the SSD1325 OLED display, converting the image pixels to 4-bit
greyscale using a simplified Luma calculation, based on
*Y'=0.299R'+0.587G'+0.114B'*.
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
self.command(
0x15, 0x00, self._w - 1, # set column addr
0x75, 0x00, self._h - 1) # set row addr
buf = bytearray(self._buffer_size)
if self.mode == "1":
self._render_mono(buf, image)
else:
self._render_greyscale(buf, image)
self.data(list(buf))
class ssd1327(device):
"""
Serial interface to a 4-bit greyscale SSD1327 OLED display.
On creation, an initialization sequence is pumped to the
display to properly configure it. Further control commands can then be
called to affect the brightness and other settings.
"""
def __init__(self, serial_interface=None, width=128, height=128, rotate=0,
mode="RGB", **kwargs):
super(ssd1327, self).__init__(luma.core.const.common, serial_interface)
self.capabilities(width, height, rotate, mode)
self._buffer_size = width * height // 2
if width != 128 or height != 128:
raise luma.core.error.DeviceDisplayModeError(
"Unsupported display mode: {0} x {1}".format(width, height))
self.command(
0xAE, # Display off (all pixels off)
0xA0, 0x53, # gment remap (com split, com remap, nibble remap, column remap)
0xA1, 0x00, # Display start line
0xA2, 0x00, # Display offset
0xA4, # regular display
0xA8, 0x7F) # set multiplex ratio: 127
self.command(
0xB8, 0x01, 0x11, # Set greyscale table
0x22, 0x32, 0x43, # .. cont
0x54, 0x65, 0x76) # .. cont
self.command(
0xB3, 0x00, # Front clock divider: 0, Fosc: 0
0xAB, 0x01, # Enable Internal Vdd
0xB1, 0xF1, # Set phase periods - 1: 1 clk, 2: 15 clks
0xBC, 0x08, # Pre-charge voltage: Vcomh
0xBE, 0x07, # COM deselect voltage level: 0.86 x Vcc
0xD5, 0x62, # Enable 2nd pre-charge
0xB6, 0x0F) # 2nd Pre-charge period: 15 clks
self.contrast(0x7F)
self.clear()
self.show()
def _render_mono(self, buf, image):
i = 0
for pix in image.getdata():
if pix > 0:
if i % 2 == 0:
buf[i // 2] = 0x0F
else:
buf[i // 2] |= 0xF0
i += 1
def _render_greyscale(self, buf, image):
i = 0
for r, g, b in image.getdata():
# RGB->Greyscale luma calculation into 4-bits
grey = (r * 306 + g * 601 + b * 117) >> 14
if grey > 0:
if i % 2 == 0:
buf[i // 2] = grey
else:
buf[i // 2] |= (grey << 4)
i += 1
def display(self, image):
"""
Takes a 1-bit monochrome or 24-bit RGB :py:mod:`PIL.Image` and dumps it
to the SSD1327 OLED display, converting the image pixels to 4-bit
greyscale using a simplified Luma calculation, based on
*Y'=0.299R'+0.587G'+0.114B'*.
"""
assert(image.mode == self.mode)
assert(image.size == self.size)
image = self.preprocess(image)
self.command(
0x15, 0x00, self._w - 1, # set column addr
0x75, 0x00, self._h - 1) # set row addr
buf = bytearray(self._buffer_size)
if self.mode == "1":
self._render_mono(buf, image)
else:
self._render_greyscale(buf, image)
self.data(list(buf))
|
# AutoEncoders
# Importing the libraries
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
# Importing the dataset
movies = pd.read_csv('ml-1m/movies.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
users = pd.read_csv('ml-1m/users.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
ratings = pd.read_csv('ml-1m/ratings.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
# Preparing the training set and the test set
training_set = pd.read_csv('ml-100k/u1.base', delimiter = '\t')
training_set = np.array(training_set, dtype = 'int')
test_set = pd.read_csv('ml-100k/u1.test', delimiter = '\t')
test_set = np.array(test_set, dtype = 'int')
# Getting the number of users and movies
nb_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
nb_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))
# Converting the data into an array with users in lines and movies in columns
def convert(data):
new_data = []
for id_users in range(1, nb_users + 1):
id_movies = data[:,1][data[:,0] == id_users]
id_ratings = data[:,2][data[:,0] == id_users]
ratings = np.zeros(nb_movies)
ratings[id_movies - 1] = id_ratings
new_data.append(list(ratings))
return new_data
training_set = convert(training_set)
test_set = convert(test_set)
# Converting the data into Torch tensors
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
# Creating the architecture of the Neural Network
class SAE(nn.Module):
def __init__(self, ):
super(SAE, self).__init__()
self.fc1 = nn.Linear(nb_movies, 20)
self.fc2 = nn.Linear(20, 10)
self.fc3 = nn.Linear(10, 20)
self.fc4 = nn.Linear(20, nb_movies)
self.activation = nn.Sigmoid()
def forward(self, x):
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.activation(self.fc3(x))
x = self.fc4(x)
return x
sae = SAE()
criterion = nn.MSELoss()
optimizer = optim.RMSprop(sae.parameters(), lr = 0.01, weight_decay = 0.5)
# Training the SAE
nb_epoch = 200
for epoch in range(1, nb_epoch + 1):
train_loss = 0
s = 0.
for id_user in range(nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = input.clone()
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output, target)
mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)
loss.backward()
train_loss += np.sqrt(loss.data*mean_corrector)
s += 1.
optimizer.step()
print('epoch: '+str(epoch)+' loss: '+str(train_loss/s))
# Testing the SAE
test_loss = 0
s = 0.
for id_user in range(nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = Variable(test_set[id_user])
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output, target)
mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)
test_loss += np.sqrt(loss.data[0]*mean_corrector)
s += 1.
print('test loss: '+str(test_loss/s))
|
#Imports
import base64
import algosdk
from algosdk.v2client import algod
from algosdk import account, mnemonic
from algosdk.future.transaction import write_to_file
from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn
from algosdk.future.transaction import PaymentTxn
# Connection
algod_address = "https://mainnet-algorand.api.purestake.io/ps2"
algod_token = ""
headers = {"X-API-Key": algod_token }
client = algod.AlgodClient(algod_token,algod_address,headers)
ADDA = ""
passphrase = ""
ADDB = ""
params = client.suggested_params()
amount = 0
txn = PaymentTxn(sender=ADDA,sp=params,receiver=ADDA,amt=amount,close_remainder_to=None,note=None,lease=None,rekey_to=ADDB)
def wait_for_confirmation(client, transaction_id, timeout):
start_round = client.status()["last-round"] + 1;
current_round = start_round
while current_round < start_round + timeout:
try:
pending_txn = client.pending_transaction_info(transaction_id)
except Exception:
return
if pending_txn.get("confirmed-round", 0) > 0:
return pending_txn
elif pending_txn["pool-error"]:
raise Exception(
'pool error: {}'.format(pending_txn["pool-error"]))
client.status_after_block(current_round)
current_round += 1
raise Exception(
'pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))
def sign_and_send(txn, passphrase, client):
private_key = mnemonic.to_private_key(passphrase)
stxn = txn.sign(private_key)
txid = stxn.transaction.get_txid()
client.send_transaction(stxn)
wait_for_confirmation(client, txid, 5)
print('Confirmed TXID: {}'.format(txid))
txinfo = client.pending_transaction_info(txid)
return txinfo
if passphrase:
txinfo = sign_and_send(txn, passphrase, client)
print("Transaction ID Confirmation: {}".format(txinfo.get("tx")))
else:
write_to_file([txns], "transfer.txn")
|
# Generated by Django 4.0 on 2022-01-03 22:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('misc', '0002_alter_stateregion_options_alter_timezone_options'),
('observe', '0013_rename_time_zone_observinglocation_x_time_zone'),
]
operations = [
migrations.AddField(
model_name='observinglocation',
name='time_zone',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to='misc.timezone'),
preserve_default=False,
),
]
|
#!/usr/bin/python
#
# Test cases for AP VLAN
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import subprocess
import logging
logger = logging.getLogger(__name__)
try:
import netifaces
netifaces_imported = True
except ImportError:
netifaces_imported = False
import hwsim_utils
import hostapd
from utils import iface_is_in_bridge, HwsimSkip
import os
from tshark import run_tshark
def test_ap_vlan_open(dev, apdev):
"""AP VLAN with open network"""
params = { "ssid": "test-vlan-open",
"dynamic_vlan": "1",
"accept_mac_file": "hostapd.accept" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[2].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
hwsim_utils.test_connectivity_iface(dev[1], hapd, "brvlan2")
hwsim_utils.test_connectivity(dev[2], hapd)
def test_ap_vlan_file_open(dev, apdev):
"""AP VLAN with open network and vlan_file mapping"""
params = { "ssid": "test-vlan-open",
"dynamic_vlan": "1",
"vlan_file": "hostapd.vlan",
"accept_mac_file": "hostapd.accept" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[2].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
hwsim_utils.test_connectivity_iface(dev[1], hapd, "brvlan2")
hwsim_utils.test_connectivity(dev[2], hapd)
def test_ap_vlan_wpa2(dev, apdev):
"""AP VLAN with WPA2-PSK"""
params = hostapd.wpa2_params(ssid="test-vlan",
passphrase="12345678")
params['dynamic_vlan'] = "1";
params['accept_mac_file'] = "hostapd.accept";
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", psk="12345678", scan_freq="2412")
dev[1].connect("test-vlan", psk="12345678", scan_freq="2412")
dev[2].connect("test-vlan", psk="12345678", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
hwsim_utils.test_connectivity_iface(dev[1], hapd, "brvlan2")
hwsim_utils.test_connectivity(dev[2], hapd)
def test_ap_vlan_wpa2_radius(dev, apdev):
"""AP VLAN with WPA2-Enterprise and RADIUS attributes"""
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "1";
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
dev[1].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan2",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
dev[2].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
hwsim_utils.test_connectivity_iface(dev[1], hapd, "brvlan2")
hwsim_utils.test_connectivity(dev[2], hapd)
def test_ap_vlan_wpa2_radius_2(dev, apdev):
"""AP VLAN with WPA2-Enterprise and RADIUS EGRESS_VLANID attributes"""
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "1";
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1b",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
def test_ap_vlan_wpa2_radius_id_change(dev, apdev):
"""AP VLAN with WPA2-Enterprise and RADIUS attributes changing VLANID"""
generic_ap_vlan_wpa2_radius_id_change(dev, apdev, False)
def test_ap_vlan_tagged_wpa2_radius_id_change(dev, apdev):
"""AP tagged VLAN with WPA2-Enterprise and RADIUS attributes changing VLANID"""
ifname1 = 'wlan0.1'
ifname2 = 'wlan0.2'
try:
# Create tagged interface for wpa_supplicant
subprocess.call(['ip', 'link', 'add', 'link', dev[0].ifname,
'name', ifname1, 'type', 'vlan', 'id', '1'])
subprocess.call(['ifconfig', ifname1, 'up'])
subprocess.call(['ip', 'link', 'add', 'link', dev[0].ifname,
'name', ifname2, 'type', 'vlan', 'id', '2'])
subprocess.call(['ifconfig', ifname2, 'up'])
generic_ap_vlan_wpa2_radius_id_change(dev, apdev, True)
finally:
subprocess.call(['ifconfig', ifname1, 'down'])
subprocess.call(['ifconfig', ifname2, 'down'])
subprocess.call(['ip', 'link', 'del', ifname1])
subprocess.call(['ip', 'link', 'del', ifname2])
def generic_ap_vlan_wpa2_radius_id_change(dev, apdev, tagged):
as_params = { "ssid": "as",
"beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18128',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key" }
authserv = hostapd.add_ap(apdev[1], as_params)
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "1";
params['auth_server_port'] = "18128"
hapd = hostapd.add_ap(apdev[0], params)
identity = "vlan1tagged" if tagged else "vlan1"
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity=identity,
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
if tagged:
hwsim_utils.run_connectivity_test(dev[0], hapd, 0, ifname1="wlan0.1",
ifname2="brvlan1")
else:
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
logger.info("VLAN-ID -> 2")
authserv.disable()
authserv.set('eap_user_file', "auth_serv/eap_user_vlan.conf")
authserv.enable()
dev[0].dump_monitor()
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("EAP reauthentication timed out")
ev = dev[0].wait_event(["WPA: Key negotiation completed"], timeout=5)
if ev is None:
raise Exception("4-way handshake after reauthentication timed out")
state = dev[0].get_status_field('wpa_state')
if state != "COMPLETED":
raise Exception("Unexpected state after reauth: " + state)
sta = hapd.get_sta(dev[0].own_addr())
if 'vlan_id' not in sta:
raise Exception("No VLAN ID in STA info")
if (not tagged) and (sta['vlan_id'] != '2'):
raise Exception("Unexpected VLAN ID: " + sta['vlan_id'])
if tagged:
hwsim_utils.run_connectivity_test(dev[0], hapd, 0, ifname1="wlan0.2",
ifname2="brvlan2")
else:
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan2")
logger.info("VLAN-ID -> 1")
time.sleep(1)
authserv.disable()
authserv.set('eap_user_file', "auth_serv/eap_user.conf")
authserv.enable()
dev[0].dump_monitor()
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("EAP reauthentication timed out")
ev = dev[0].wait_event(["WPA: Key negotiation completed"], timeout=5)
if ev is None:
raise Exception("4-way handshake after reauthentication timed out")
state = dev[0].get_status_field('wpa_state')
if state != "COMPLETED":
raise Exception("Unexpected state after reauth: " + state)
sta = hapd.get_sta(dev[0].own_addr())
if 'vlan_id' not in sta:
raise Exception("No VLAN ID in STA info")
if (not tagged) and (sta['vlan_id'] != '1'):
raise Exception("Unexpected VLAN ID: " + sta['vlan_id'])
time.sleep(0.2)
try:
if tagged:
hwsim_utils.run_connectivity_test(dev[0], hapd, 0,
ifname1="wlan0.1",
ifname2="brvlan1")
else:
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
except Exception, e:
# It is possible for new bridge setup to not be ready immediately, so
# try again to avoid reporting issues related to that.
logger.info("First VLAN-ID 1 data test failed - try again")
if tagged:
hwsim_utils.run_connectivity_test(dev[0], hapd, 0,
ifname1="wlan0.1",
ifname2="brvlan1")
else:
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
def test_ap_vlan_wpa2_radius_required(dev, apdev):
"""AP VLAN with WPA2-Enterprise and RADIUS attributes required"""
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "2";
hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
dev[2].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412", wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-DISCONNECTED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without tunnel parameters")
def test_ap_vlan_tagged(dev, apdev):
"""AP VLAN with tagged interface"""
params = { "ssid": "test-vlan-open",
"dynamic_vlan": "1",
"vlan_tagged_interface": "lo",
"accept_mac_file": "hostapd.accept" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
dev[2].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brlo.1")
hwsim_utils.test_connectivity_iface(dev[1], hapd, "brlo.2")
hwsim_utils.test_connectivity(dev[2], hapd)
def ap_vlan_iface_cleanup_multibss_cleanup():
subprocess.call(['ifconfig', 'dummy0', 'down'],
stderr=open('/dev/null', 'w'))
ifnames = [ 'wlan3.1', 'wlan3.2', 'wlan3-2.1', 'wlan3-2.2', 'dummy0.2',
'dummy0.1', 'dummy0', 'brvlan1', 'brvlan2' ]
for ifname in ifnames:
subprocess.call(['ip', 'link', 'del', ifname],
stderr=open('/dev/null', 'w'))
def ap_vlan_iface_test_and_prepare_environ():
ifaces = netifaces.interfaces()
if "dummy0" in ifaces:
raise Exception("dummy0 already exists before")
ifaces = netifaces.interfaces()
if "dummy0.1" in ifaces:
raise Exception("dummy0.1 already exists before")
subprocess.call(['ip', 'link', 'add', 'dummy0', 'type', 'dummy'])
subprocess.call(['ifconfig', 'dummy0', 'up'])
ifaces = netifaces.interfaces()
if not("dummy0" in ifaces):
raise HwsimSkip("failed to add dummy0 - missing kernel config DUMMY ?")
subprocess.call(['ip', 'link', 'add', 'link', 'dummy0', 'name', 'dummy0.1',
'type', 'vlan', 'id', '1'])
ifaces = netifaces.interfaces()
if not("dummy0.1" in ifaces):
raise HwsimSkip("failed to add dummy0.1 - missing kernel config VLAN_8021Q ?")
subprocess.call(['ip', 'link', 'del', 'dummy0.1'])
ifaces = netifaces.interfaces()
if "dummy0.1" in ifaces:
raise Exception("dummy0.1 was not removed before testing")
def test_ap_vlan_iface_cleanup_multibss(dev, apdev):
"""AP VLAN operation in multi-BSS multi-VLAN case"""
ap_vlan_iface_cleanup_multibss(dev, apdev, 'multi-bss-iface.conf')
def ap_vlan_iface_cleanup_multibss(dev, apdev, cfgfile):
# AP VLAN with WPA2-Enterprise and RADIUS attributes changing VLANID
# check that multiple bss do not interfere with each other with respect
# to deletion of bridge and tagged interface.
if not netifaces_imported:
raise HwsimSkip("python module netifaces not available")
try:
ap_vlan_iface_cleanup_multibss_cleanup()
ap_vlan_iface_test_and_prepare_environ()
as_params = { "ssid": "as",
"beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18128',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key",
"vlan_naming": "1" }
authserv = hostapd.add_ap(apdev[1], as_params)
ifname = apdev[0]['ifname']
# start the actual test
hostapd.add_iface(ifname, cfgfile)
hapd = hostapd.Hostapd(ifname)
hapd1 = hostapd.Hostapd("wlan3-2", 1)
hapd1.enable()
ifaces = netifaces.interfaces()
if "brvlan1" in ifaces:
raise Exception("bridge brvlan1 already exists before")
if "brvlan2" in ifaces:
raise Exception("bridge brvlan2 already exists before")
dev[0].connect("bss-1", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
ifaces = netifaces.interfaces()
if not("brvlan1" in ifaces):
raise Exception("bridge brvlan1 was not created")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
if not iface_is_in_bridge("brvlan1", "dummy0.1"):
raise Exception("dummy0.1 not in brvlan1")
dev[1].connect("bss-2", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[1], hapd1, "brvlan1")
if not iface_is_in_bridge("brvlan1", "dummy0.1"):
raise Exception("dummy0.1 not in brvlan1")
authserv.disable()
authserv.set('eap_user_file', "auth_serv/eap_user_vlan.conf")
authserv.enable()
logger.info("wlan0 -> VLAN 2")
dev[0].dump_monitor()
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("EAP reauthentication timed out")
ev = dev[0].wait_event(["WPA: Key negotiation completed"], timeout=5)
if ev is None:
raise Exception("4-way handshake after reauthentication timed out")
state = dev[0].get_status_field('wpa_state')
if state != "COMPLETED":
raise Exception("Unexpected state after reauth: " + state)
ifaces = netifaces.interfaces()
if not ("brvlan1" in ifaces):
raise Exception("bridge brvlan1 has been removed too early")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan2",
max_tries=5)
if not iface_is_in_bridge("brvlan2", "dummy0.2"):
raise Exception("dummy0.2 not in brvlan2")
logger.info("test wlan1 == VLAN 1")
hwsim_utils.test_connectivity_iface(dev[1], hapd1, "brvlan1")
if not iface_is_in_bridge("brvlan1", "dummy0.1"):
raise Exception("dummy0.1 not in brvlan1")
logger.info("wlan1 -> VLAN 2")
dev[1].dump_monitor()
dev[1].request("REAUTHENTICATE")
ev = dev[1].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("EAP reauthentication timed out")
ev = dev[1].wait_event(["WPA: Key negotiation completed"], timeout=5)
if ev is None:
raise Exception("4-way handshake after reauthentication timed out")
state = dev[1].get_status_field('wpa_state')
if state != "COMPLETED":
raise Exception("Unexpected state after reauth: " + state)
# it can take some time for data connectivity to be updated
hwsim_utils.test_connectivity_iface(dev[1], hapd1, "brvlan2",
max_tries=5)
logger.info("test wlan0 == VLAN 2")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan2")
if not iface_is_in_bridge("brvlan2", "dummy0.2"):
raise Exception("dummy0.2 not in brvlan2")
ifaces = netifaces.interfaces()
if "brvlan1" in ifaces:
raise Exception("bridge brvlan1 has not been cleaned up")
# disconnect dev0 first to test a corner case
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
# station removal needs some time
for i in range(5):
time.sleep(1)
ifaces = netifaces.interfaces()
if "brvlan2" not in ifaces:
break
ifaces = netifaces.interfaces()
if "brvlan2" in ifaces:
raise Exception("bridge brvlan2 has not been cleaned up")
hapd.request("DISABLE")
finally:
ap_vlan_iface_cleanup_multibss_cleanup()
def test_ap_vlan_iface_cleanup_multibss_per_sta_vif(dev, apdev):
"""AP VLAN operation in multi-BSS multi-VLAN case with per-sta-vif set"""
# AP VLAN with WPA2-Enterprise and RADIUS attributes changing VLANID
# check that multiple bss do not interfere with each other with respect
# to deletion of bridge and tagged interface. per_sta_vif is enabled.
ap_vlan_iface_cleanup_multibss(dev, apdev,
'multi-bss-iface-per_sta_vif.conf')
def test_ap_vlan_without_station(dev, apdev, p):
"""AP VLAN with WPA2-PSK and no station"""
try:
subprocess.call(['brctl', 'addbr', 'brvlan1'])
subprocess.call(['brctl', 'setfd', 'brvlan1', '0'])
subprocess.call(['ifconfig', 'brvlan1', 'up'])
# use a passphrase wlantest does not know, so it cannot
# inject decrypted frames into pcap
params = hostapd.wpa2_params(ssid="test-vlan",
passphrase="12345678x")
params['dynamic_vlan'] = "1";
params['vlan_file'] = 'hostapd.wlan3.vlan'
params['accept_mac_file'] = "hostapd.accept";
hapd = hostapd.add_ap(apdev[0], params)
# inject some traffic
sa = hapd.own_addr()
da = "ff:ff:ff:ff:ff:00"
hapd.request('DATA_TEST_CONFIG 1 ifname=brvlan1')
hapd.request('DATA_TEST_TX {} {} 0'.format(da, sa))
hapd.request('DATA_TEST_CONFIG 0')
time.sleep(.1)
dev[0].connect("test-vlan", psk="12345678x", scan_freq="2412")
# inject some traffic
sa = hapd.own_addr()
da = "ff:ff:ff:ff:ff:01"
hapd.request('DATA_TEST_CONFIG 1 ifname=brvlan1')
hapd.request('DATA_TEST_TX {} {} 0'.format(da, sa))
hapd.request('DATA_TEST_CONFIG 0')
# let the AP send couple of Beacon frames
time.sleep(1)
out = run_tshark(os.path.join(p['logdir'], "hwsim0.pcapng"),
"wlan.da == ff:ff:ff:ff:ff:00",
["wlan.fc.protected"])
if out is not None:
lines = out.splitlines()
if len(lines) < 1:
raise Exception("first frame not observed")
state = 1
for l in lines:
is_protected = int(l, 16)
if is_protected != 1:
state = 0
if state != 1:
raise Exception("Broadcast packets were not encrypted when no station was connected")
else:
raise Exception("first frame not observed")
out = run_tshark(os.path.join(p['logdir'], "hwsim0.pcapng"),
"wlan.da == ff:ff:ff:ff:ff:01",
["wlan.fc.protected"])
if out is not None:
lines = out.splitlines()
if len(lines) < 1:
raise Exception("second frame not observed")
state = 1
for l in lines:
is_protected = int(l, 16)
if is_protected != 1:
state = 0
if state != 1:
raise Exception("Broadcast packets were not encrypted when station was connected")
else:
raise Exception("second frame not observed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'brvlan1', 'down'])
subprocess.call(['ip', 'link', 'set', 'dev', 'wlan3.1', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delif', 'brvlan1', 'wlan3.1'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'brvlan1'])
def test_ap_open_per_sta_vif(dev, apdev):
"""AP VLAN with open network"""
params = { "ssid": "test-vlan-open",
"per_sta_vif": "1" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd,
apdev[0]['ifname'] + ".4096")
def test_ap_vlan_open_per_sta_vif(dev, apdev):
"""AP VLAN (dynamic) with open network"""
params = { "ssid": "test-vlan-open",
"per_sta_vif": "1",
"dynamic_vlan": "1" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan-open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd,
apdev[0]['ifname'] + ".4096")
def test_ap_vlan_wpa2_radius_tagged(dev, apdev):
"""AP VLAN with WPA2-Enterprise and RADIUS EGRESS_VLANID attributes"""
ifname = 'wlan0.1'
try:
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "1"
params["vlan_naming"] = "1"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan1tagged",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
# Create tagged interface for wpa_supplicant
subprocess.call(['ip', 'link', 'add', 'link', dev[0].ifname,
'name', ifname, 'type', 'vlan', 'id', '1'])
subprocess.call(['ifconfig', ifname, 'up'])
hwsim_utils.run_connectivity_test(dev[0], hapd, 0, ifname1=ifname,
ifname2="brvlan1")
finally:
subprocess.call(['ifconfig', ifname, 'down'])
subprocess.call(['ip', 'link', 'del', ifname])
def test_ap_vlan_wpa2_radius_mixed(dev, apdev):
"""AP VLAN with WPA2-Enterprise and tagged+untagged VLANs"""
ifname = 'wlan0.1'
try:
params = hostapd.wpa2_eap_params(ssid="test-vlan")
params['dynamic_vlan'] = "1"
params["vlan_naming"] = "1"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-vlan", key_mgmt="WPA-EAP", eap="PAX",
identity="vlan12mixed",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
# Add tagged VLAN interface to wpa_supplicant interface for testing
subprocess.call(['ip', 'link', 'add', 'link', dev[0].ifname,
'name', ifname, 'type', 'vlan', 'id', '1'])
subprocess.call(['ifconfig', ifname, 'up'])
logger.info("Test connectivity in untagged VLAN 2")
hwsim_utils.run_connectivity_test(dev[0], hapd, 0,
ifname1=dev[0].ifname,
ifname2="brvlan2")
logger.info("Test connectivity in tagged VLAN 1")
hwsim_utils.run_connectivity_test(dev[0], hapd, 0, ifname1=ifname,
ifname2="brvlan1")
finally:
subprocess.call(['ifconfig', ifname, 'down'])
subprocess.call(['ip', 'link', 'del', ifname])
def test_ap_vlan_reconnect(dev, apdev):
"""AP VLAN with WPA2-PSK connect, disconnect, connect"""
params = hostapd.wpa2_params(ssid="test-vlan",
passphrase="12345678")
params['dynamic_vlan'] = "1";
params['accept_mac_file'] = "hostapd.accept";
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connect sta")
dev[0].connect("test-vlan", psk="12345678", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
logger.info("disconnect sta")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=10)
time.sleep(1)
logger.info("reconnect sta")
dev[0].connect("test-vlan", psk="12345678", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "brvlan1")
|
import hashlib
import itertools
import logging
import random
import re
import os
import datetime
from itertools import count, islice
from datetime import timedelta
import bleach
from taggit.models import Tag
from django import template, forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.timezone import utc
from django.core.paginator import Paginator
from django.core.cache import cache
from biostar.forum import markdown
from biostar.accounts.models import Profile, Message
from biostar.forum import const, auth
from biostar.forum.models import Post, Vote, Award, Subscription
User = get_user_model()
logger = logging.getLogger("engine")
register = template.Library()
ICON_MAP = dict(
rank="list ol icon",
views="eye icon",
replies="comments icon",
votes="thumbs up icon",
all='calendar plus icon',
today='clock icon',
week='calendar minus outline icon',
month='calendar alternate icon',
year='calendar icon',
visit='sort numeric down icon',
reputation='star icon',
joined='sign in icon',
activity='comment icon',
rsent="sort numeric down icon",
sent="sort numeric up icon",
rep="user outline icon",
tagged="tags icon",
)
def get_count(request, key, default=0):
"""
Returns a count stored in the session.
"""
value = request.session.get(const.COUNT_DATA_KEY, {}).get(key, default)
return value
@register.simple_tag(takes_context=True)
def activate(context, state, target):
targets = target.split(',')
label = "active" if state in targets else ""
return label
@register.filter
def bignum(number):
"Reformats numbers with qualifiers as K"
try:
value = float(number) / 1000.0
if value > 10:
return "%0.fk" % value
elif value > 1:
return "%0.1fk" % value
except ValueError as exc:
pass
return str(number)
@register.simple_tag(takes_context=True)
def counts(context):
request = context['request']
vcounts = get_count(request, 'vote_count') or 0
mcounts = get_count(request, 'message_count') or 0
votes = dict(count=vcounts)
messages = dict(count=mcounts)
return dict(votes=votes, messages=messages)
@register.inclusion_tag('search/search_pages.html', takes_context=True)
def pages_search(context, results):
previous_page = results.pagenum - 1
next_page = results.pagenum + 1 if not results.is_last_page() else results.pagenum
request = context['request']
query = request.GET.get('query', '')
context = dict(results=results, previous_page=previous_page, query=query,
next_page=next_page)
return context
@register.inclusion_tag('widgets/post_details.html')
def post_details(post, user, avatar=True):
return dict(post=post, user=user, avatar=avatar)
@register.simple_tag
def post_type_display(post_type):
mapper = dict(Post.TYPE_CHOICES)
return mapper.get(post_type)
def now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
@register.simple_tag
def gravatar(user=None, user_uid=None, size=80):
hasattr(user, 'profile')
if user_uid and hasattr(user, 'profile'):
user = User.objects.filter(profile__uid=user_uid).first()
return auth.gravatar(user=user, size=size)
@register.inclusion_tag('widgets/filter_dropdown.html', takes_context=True)
def filter_dropdown(context):
return context
@register.inclusion_tag('widgets/user_icon.html', takes_context=True)
def user_icon(context, user=None, is_moderator=False, is_spammer=False, score=0):
try:
is_moderator = user.profile.is_moderator if user else is_moderator
score = user.profile.get_score() if user else score * 10
is_spammer = user.profile.is_spammer if user else is_spammer
except Exception as exc:
logger.info(exc)
context.update(dict(is_moderator=is_moderator, is_spammer=is_spammer, score=score))
return context
@register.simple_tag()
def user_icon_css(user=None):
css = ''
if user and user.is_authenticated:
if user.profile.is_moderator:
css = "bolt icon"
elif user.profile.score > 1000:
css = "user icon"
else:
css = "user outline icon"
return css
@register.inclusion_tag('widgets/post_user_line.html', takes_context=True)
def post_user_line(context, post, avatar=False, user_info=True):
context.update(dict(post=post, avatar=avatar, user_info=user_info))
return context
@register.inclusion_tag('widgets/post_user_line.html', takes_context=True)
def postuid_user_line(context, uid, avatar=True, user_info=True):
post = Post.objects.filter(uid=uid).first()
context.update(dict(post=post, avatar=avatar, user_info=user_info))
return context
@register.inclusion_tag('widgets/user_card.html', takes_context=True)
def user_card(context, target):
context.update(dict(target=target))
return context
@register.inclusion_tag('widgets/post_user_box.html', takes_context=True)
def post_user_box(context, target_user, post):
context.update(dict(target_user=target_user, post=post))
return context
@register.inclusion_tag('widgets/post_actions.html', takes_context=True)
def post_actions(context, post, label="ADD COMMENT", author=None, lastedit_user=None, avatar=False):
request = context["request"]
return dict(post=post, user=request.user, author=author, lastedit_user=lastedit_user,
label=label, request=request, avatar=avatar)
@register.inclusion_tag('widgets/post_tags.html')
def post_tags(post=None, post_uid=None, show_views=False, tags_str='', spaced=True):
if post_uid:
post = Post.objects.filter(uid=post_uid).first()
tags = tags_str.split(",") if tags_str else ''
tags = post.tag_val.split(",") if post else tags
return dict(post=post, tags=tags, show_views=show_views, spaced=spaced)
@register.inclusion_tag('widgets/pages.html', takes_context=True)
def pages(context, objs, show_step=True):
request = context["request"]
url = request.path
return dict(objs=objs, url=url, show_step=show_step, request=request)
@register.simple_tag
def randparam():
"Append to URL to bypass server caching of CSS or JS files"
return f"?randval={random.randint(1, 10000000)}" if settings.DEBUG else ""
@register.inclusion_tag('widgets/show_messages.html')
def show_messages(messages):
"""
Renders the messages
"""
return dict(messages=messages)
@register.filter
def unread(message, user):
if message.recipient == user and message.unread:
return "unread-message"
return ""
@register.simple_tag
def toggle_unread(user):
Message.objects.filter(recipient=user, unread=True).update(unread=False)
return ''
@register.simple_tag(takes_context=True)
def digest_label(context, post):
user = context['request'].user
no_digest = 'No digest'
label_map = {
Profile.WEEKLY_DIGEST: "Weekly digest",
Profile.MONTHLY_DIGEST: "Monthly digest",
Profile.DAILY_DIGEST: 'Daily digest',
Profile.NO_DIGEST: no_digest
}
if user.is_anonymous:
return no_digest
label = label_map.get(user.profile.digest_prefs, no_digest)
return label
@register.simple_tag(takes_context=True)
def follow_label(context, post):
user = context["request"].user
not_following = "not following"
label_map = {
Subscription.LOCAL_MESSAGE: "following with messages",
Subscription.EMAIL_MESSAGE: "following via email",
Subscription.NO_MESSAGES: not_following,
}
if user.is_anonymous:
return not_following
# Get the current subscription
sub = Subscription.objects.filter(post=post.root, user=user).first()
sub = sub or Subscription(post=post, user=user, type=Subscription.NO_MESSAGES)
label = label_map.get(sub.type, not_following)
return label
@register.simple_tag
def inplace_type_field(post=None, field_id='type'):
choices = [opt for opt in Post.TYPE_CHOICES]
choices = filter(lambda opt: (opt[1] in settings.ALLOWED_POST_TYPES) if settings.ALLOWED_POST_TYPES else
(opt[0] in Post.TOP_LEVEL), choices)
post_type = forms.IntegerField(label="Post Type",
widget=forms.Select(choices=choices, attrs={'class': "ui fluid dropdown",
'id': field_id}),
help_text="Select a post type.")
value = post.type if post else Post.QUESTION
post_type = post_type.widget.render('post_type', value)
return mark_safe(post_type)
def get_tags_file():
"""
Get a list of files to render from a file
"""
# Get the tags op
tags_file = getattr(settings, "TAGS_OPTIONS_FILE", None)
return tags_file
def read_tags(exclude=[], limit=5000):
"""Read tags from a file. Each line is considered a tag. """
# Get tags from a file
tags_file = get_tags_file() or ''
stream = open(tags_file, 'r') if os.path.exists(tags_file) else []
stream = islice(zip(count(1), stream), limit)
tags_opts = set()
for idx, line in stream:
line = line.strip()
if line in exclude:
continue
tags_opts.add((line, False))
return tags_opts
def get_dropdown_options(selected_list):
"""
Present tags tags in a multi-select dropdown format.
"""
limit = 50
# Gather already selected tags
selected = {(val, True) for val in selected_list}
# Read tags from file.
try:
opts = read_tags(exclude=selected_list)
except Exception as exc:
logger.error(f"Error reading tags from file.:{exc}")
opts = []
# Read tags from database if none found in file.
if not opts:
query = Tag.objects.exclude(name__in=selected_list)[:limit].values_list("name", flat=True)
opts = {(name.strip(), False) for name in query}
# Chain the selected and rest of the options
opts = itertools.chain(selected, opts)
return opts
@register.inclusion_tag('forms/field_tags.html', takes_context=True)
def tags_field(context, form_field, initial=''):
"""Render multi-select dropdown options for tags. """
# Get currently selected tags from the post or request
selected = initial.split(",") if initial else []
options = get_dropdown_options(selected_list=selected)
context = dict(initial=initial, form_field=form_field, dropdown_options=options)
return context
@register.inclusion_tag('forms/form_errors.html')
def form_errors(form, wmd_prefix='', override_content=False):
"""
Turns form errors into a data structure
"""
try:
errorlist = [('', message) for message in form.non_field_errors()]
for field in form:
for error in field.errors:
# wmd_prefix is required when dealing with 'content' field.
field_id = wmd_prefix if (override_content and field.name is 'content') else field.id_for_label
errorlist.append((f'{field.name}:', error, field_id))
except Exception as exc:
errorlist = []
logging.error(exc)
context = dict(errorlist=errorlist)
return context
@register.inclusion_tag('widgets/post_body.html', takes_context=True)
def post_body(context, post, user, tree):
"Renders the post body"
request = context['request']
return dict(post=post, user=user, tree=tree, request=request)
@register.filter
def get_award_context(award):
post = award.post
context = f"For : <a href={post.get_absolute_url()}>{post.title}</a>" if post else ""
return context
@register.filter
def get_user_location(user):
return user.profile.location or "location unknown"
@register.filter
def get_last_login(user):
if user.profile.last_login:
return f"{time_ago(user.profile.last_login)}"
return f"{time_ago(user.profile.date_joined)}"
@register.filter
def highlight(hit, field):
lit = hit.highlights(field, top=5)
return mark_safe(lit) if len(lit) else hit[field]
@register.inclusion_tag('widgets/feed_custom.html')
def custom_feed(objs, feed_type='', title=''):
users = ()
if feed_type == 'messages':
users = set(m.sender for m in objs)
if feed_type in ['following', 'bookmarks', 'votes']:
users = set(o.author for o in objs)
context = dict(users=users, title=title)
return context
@register.inclusion_tag(takes_context=True, filename='search/search_bar.html')
def search_bar(context, tags=False, users=False):
search_url = reverse('tags_list') if tags else reverse('community_list') if users else reverse('post_search')
request = context['request']
value = request.GET.get('query', '')
context = dict(search_url=search_url, value=value)
return context
@register.simple_tag
def get_post_list(target, request, show=None):
"""
Return post list belonging to a user
"""
user = request.user
page = request.GET.get("page", 1)
posts = Post.objects.valid_posts(u=user, author=target)
# Show a specific post listing.
show_map = dict(questions=Post.QUESTION, tools=Post.TOOL, news=Post.NEWS,
blogs=Post.BLOG, tutorials=Post.TUTORIAL, answers=Post.ANSWER,
comments=Post.COMMENT)
type_filter = show_map.get(show)
posts = posts.filter(type=type_filter) if type_filter is not None else posts
posts = posts.select_related("root").select_related( "author__profile", "lastedit_user__profile")
posts = posts.order_by("-rank")
# Cache the users posts add pagination to posts.
paginator = Paginator(object_list=posts, per_page=settings.POSTS_PER_PAGE)
posts = paginator.get_page(page)
return posts
@register.inclusion_tag('widgets/feed_default.html')
def default_feed(user):
recent_votes = Vote.objects.filter(post__status=Post.OPEN,
post__root__status=Post.OPEN).prefetch_related("post")
recent_votes = recent_votes.order_by("-pk")[:settings.VOTE_FEED_COUNT]
# Get valid users that have a location set in profile.
recent_locations = Profile.objects.valid_users().exclude(location="").prefetch_related("user")
recent_locations = recent_locations.order_by('-last_login')[:settings.LOCATION_FEED_COUNT]
# Get valid results
recent_awards = Award.objects.valid_awards().select_related("badge", "user", "user__profile")
recent_awards = recent_awards.order_by("-pk")[:settings.AWARDS_FEED_COUNT]
# Get valid posts
recent_replies = Post.objects.valid_posts(is_toplevel=False).select_related("author__profile", "author")
recent_replies = recent_replies.order_by("-pk")[:settings.REPLIES_FEED_COUNT]
context = dict(recent_votes=recent_votes, recent_awards=recent_awards, users=[],
recent_locations=recent_locations, recent_replies=recent_replies,
user=user)
return context
@register.simple_tag
def planet_gravatar(planet_author):
email = planet_author.replace(' ', '')
email = f"{email}@planet.org"
email = email.encode('utf-8')
return auth.gravatar_url(email=email, style='retro')
@register.simple_tag
def get_icon(string, default=""):
icon = ICON_MAP.get(string) or ICON_MAP.get(default)
return icon
@register.simple_tag
def get_digest_icon(user):
no_digest = 'bell slash icon'
icon_map = {Profile.WEEKLY_DIGEST: 'hourglass icon', Profile.MONTHLY_DIGEST: 'calendar icon',
Profile.DAILY_DIGEST: 'clock icon', Profile.NO_DIGEST: no_digest}
icon = icon_map.get(user.profile.digest_prefs) or no_digest
return icon
@register.inclusion_tag('widgets/list_awards.html', takes_context=True)
def list_awards(context, target):
request = context['request']
awards = Award.objects.filter(user=target).select_related('post', 'post__root', 'user', 'user__profile',
'badge').order_by("-date")
page = request.GET.get('page', 1)
# Create the paginator
paginator = Paginator(object_list=awards, per_page=20)
# Apply the votes paging.
awards = paginator.get_page(page)
context = dict(awards=awards, request=request)
return context
@register.simple_tag
def get_wording(filtered, prefix="Sort by:", default=""):
"""
Get the naming and icons for limits and ordering.
"""
display = dict(all="all time", week="this week", month="this month",
year="this year", rank="Rank", views="Views", today="today",
replies="replies", votes="Votes", visit="recent visit",
reputation="reputation", joined="date joined", activity="activity level",
rsent="oldest to newest ", sent="newest to oldest",
rep="sender reputation", tagged="tagged")
if display.get(filtered):
displayed = display[filtered]
else:
displayed = display[default]
wording = f"{prefix} {displayed}"
return wording
@register.simple_tag
def activate_check_mark(filter, active):
if filter == active:
return 'check icon'
return ''
@register.simple_tag
def relative_url(value, field_name, urlencode=None):
"""
Updates field_name parameters in url with new value
"""
# Create preform_search string with updated field_name, value pair.
url = f'?{field_name}={value}'
if urlencode:
# Split preform_search string
querystring = urlencode.split('&')
# Exclude old value 'field_name' from preform_search string
filter_func = lambda p: p.split('=')[0] != field_name
filtered_querystring = filter(filter_func, querystring)
# Join the filtered string
encoded_querystring = '&'.join(filtered_querystring)
# Update preform_search string
url = f'{url}&{encoded_querystring}'
return url
@register.simple_tag
def get_thread_users(users, post, limit=2):
displayed_users = {post.author, post.lastedit_user or post.author}
for user in users:
if len(displayed_users) >= limit:
break
if user in displayed_users:
continue
displayed_users.add(user)
return displayed_users
@register.filter
def show_nonzero(value):
"The purpose of this is to return value or empty"
return value if value else ''
@register.simple_tag
def object_count(request, otype):
user = request.user
count = 0
if user.is_authenticated:
if otype == "message":
count = user.profile.new_messages
return count
def pluralize(value, word):
if value > 1:
return "%d %ss" % (value, word)
else:
return "%d %s" % (value, word)
@register.filter
def time_ago(date):
if not date:
return ''
delta = now() - date
if delta < timedelta(minutes=1):
return 'just now'
elif delta < timedelta(hours=1):
unit = pluralize(delta.seconds // 60, "minute")
elif delta < timedelta(days=1):
unit = pluralize(delta.seconds // 3600, "hour")
elif delta < timedelta(days=30):
unit = pluralize(delta.days, "day")
elif delta < timedelta(days=90):
unit = pluralize(int(delta.days / 7), "week")
elif delta < timedelta(days=730):
unit = pluralize(int(delta.days / 30), "month")
else:
diff = delta.days / 365.0
unit = '%0.1f years' % diff
return "%s ago" % unit
@register.simple_tag
def subscription_label(sub_count):
if sub_count > 5:
return mark_safe(f'<div class="subs">{sub_count} follow</div>')
return ""
@register.filter
def bignum(number):
"Reformats numbers with qualifiers as K"
try:
value = float(number) / 1000.0
if value > 10:
return "%0.fk" % value
elif value > 1:
return "%0.1fk" % value
except ValueError as exc:
pass
return str(number)
def post_boxclass(root_type, answer_count, root_has_accepted):
# Create the css class for each row
if root_type == Post.JOB:
style = "job"
elif root_type == Post.TUTORIAL:
style = "tutorial"
elif root_type == Post.TOOL:
style = "tool"
elif root_type == Post.FORUM:
style = "forum"
elif root_type == Post.NEWS:
style = "news"
else:
style = "question"
if isinstance(answer_count, int) and int(answer_count) >= 1:
style += " has_answers"
if root_has_accepted == True:
modifier = "accepted answer" if root_type == Post.QUESTION else "accepted"
else:
modifier = "open"
return f"{style} {modifier}"
@register.simple_tag
def search_boxclass(root_type, answer_count, root_has_accepted):
return post_boxclass(root_type=root_type,
answer_count=answer_count,
root_has_accepted=root_has_accepted)
@register.simple_tag
def boxclass(post=None, uid=None):
return post_boxclass(root_type=post.root.type,
answer_count=post.root.answer_count,
root_has_accepted=post.root.has_accepted)
@register.simple_tag(takes_context=True)
def render_comments(context, tree, post, template_name='widgets/comment_body.html'):
request = context["request"]
if post.id in tree:
text = traverse_comments(request=request, post=post, tree=tree, template_name=template_name)
else:
text = ''
return mark_safe(text)
def traverse_comments(request, post, tree, template_name):
"Traverses the tree and generates the page"
body = template.loader.get_template(template_name)
seen = set()
def traverse(node, collect=[]):
cont = {"post": node, 'user': request.user, 'request': request}
html = body.render(cont)
collect.append(f'<div class="indent" ><div>{html}</div>')
for child in tree.get(node.id, []):
if child in seen:
raise Exception(f"circular tree {child.pk} {child.title}")
seen.add(child)
traverse(child, collect=collect)
collect.append(f"</div>")
# this collects the comments for the post
collect = ['<div class="comment-list">']
for node in tree[post.id]:
traverse(node, collect=collect)
collect.append("</div>")
html = '\n'.join(collect)
return html
def top_level_only(attrs, new=False):
'''
Helper function used when linkifying with bleach.
'''
if not new:
return attrs
text = attrs['_text']
if not text.startswith(('http:', 'https:')):
return None
return attrs
@register.simple_tag
def markdown_file(pattern):
"""
Returns the content of a file matched by the pattern.
Returns an error message if the pattern cannot be found.
"""
path = pattern
path = os.path.abspath(path)
if os.path.isfile(path):
text = open(path).read()
else:
text = f" file '{pattern}': '{path}' not found"
try:
html = markdown.parse(text, clean=False, escape=False, allow_rewrite=True)
html = bleach.linkify(html, callbacks=[top_level_only], skip_tags=['pre'])
html = mark_safe(html)
except Exception as e:
html = f"Markdown rendering exception"
logger.error(e)
return html
class MarkDownNode(template.Node):
#CALLBACKS = [top_level_only]
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
text = self.nodelist.render(context)
text = markdown.parse(text, clean=False, escape=False, allow_rewrite=True)
#text = bleach.linkify(text, callbacks=self.CALLBACKS, skip_tags=['pre'])
return text
@register.tag('markdown')
def markdown_tag(parser, token):
"""
Enables a block of markdown text to be used in a template.
Syntax::
{% markdown %}
## Markdown
Now you can write markdown in your templates. This is good because:
* markdown is awesome
* markdown is less verbose than writing html by hand
{% endmarkdown %}
"""
nodelist = parser.parse(('endmarkdown',))
# need to do this otherwise we get big fail
parser.delete_first_token()
return MarkDownNode(nodelist)
|
from rest_framework import permissions
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
)
from rest_framework.response import Response
from .models import Ingredients
@api_view(["GET", "HEAD"])
@permission_classes([permissions.AllowAny])
@authentication_classes([])
def healthz(request, *args, **kwargs):
"""Create menu"""
return Response(status=200)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from graphscope.client.session import get_default_session
from graphscope.dataset.io_utils import DATA_SITE
from graphscope.dataset.io_utils import download_file
def load_p2p_network(sess=None, prefix=None, directed=False):
"""Load p2p graph.
A peer-to-peer dataset derived from Gnutella peer-to-peer network, August 31 2002,
with generated data on vertices and edges. See more details here:
http://snap.stanford.edu/data/p2p-Gnutella31.html
Args:
sess (:class:`graphscope.Session`): Load graph within the session.
Default session will be used when setting to None. Defaults to None.
prefix: `PathLike` object that represents a path.
With standalone mode, set prefix None will try to download from
source URL. Defaults to None.
directed (bool, optional): Determine to load a directed or undirected graph.
Defaults to True.
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`:
A Graph node which graph type is ArrowProperty, evaluated in eager mode.
Examples:
.. code:: python
>>> # lazy mode
>>> import graphscope
>>> from graphscope.dataset import load_p2p_network
>>> sess = graphscope.session(mode="lazy")
>>> g = load_p2p_network(sess, "/path/to/dataset")
>>> g1 = sess.run(g)
>>> # eager mode
>>> import graphscope
>>> from graphscope.dataset import load_p2p_network
>>> sess = graphscope.session(mode="eager")
>>> g = load_p2p_network(sess, "/path/to/dataset")
"""
if prefix is not None:
prefix = os.path.expandvars(prefix)
else:
fname = "p2p_network.tar.gz"
origin = f"{DATA_SITE}/p2p_network.tar.gz"
fpath = download_file(
fname,
origin=origin,
extract=True,
file_hash="117131735186caff23ea127beec61b5396662c0815fc7918186451fe957e8c2f",
)
# assumed dirname is p2p_network after extracting from p2p_network.tar.gz
prefix = fpath[0:-7]
if sess is None:
sess = get_default_session()
graph = sess.g(directed=directed)
graph = graph.add_vertices(
os.path.join(prefix, "p2p-31_property_v_0"), "host"
).add_edges(
os.path.join(prefix, "p2p-31_property_e_0"),
"connect",
src_label="host",
dst_label="host",
)
return graph
|
import json
import matplotlib.pyplot as plt
from collections import defaultdict
def main():
# genres = defaultdict(int)
language = defaultdict(int)
# rating = defaultdict(int)
# runtime = []
# tokens = []
# imdb = []
# tmdb = []
# meta = []
for movie in json.load(open("movies.json", "r")):
movie = json.load(open("movies/" + movie['id'] + ".json", "r"))
if filter(movie):
# for genre in movie['genres']:
# genres[genre] += 1
language[movie['original_language']] += 1
# rating[movie['rating']] += 1
# runtime.append(movie['runtime'])
# tokens.append(len(movie['tokens']))
# imdb.append(movie['imdb_score_value'])
# tmdb.append(movie['tmdb_score_value'])
# meta.append(movie['meta_score_value'])
# genres = {k: v for k, v in genres.items() if v > 20}
# rating = {k: v for k, v, in rating.items() if v > 10}
# runtime = [t for t in runtime if t < 200]
# tokens = [t for t in tokens if t < 1000]
# language = {k: v for k, v in language.items() if v > 50}
# plt.subplots(1, 3, figsize=(15, 15))
plt.figure(figsize=(24, 13.5), dpi=80)
# plt.subplot(331)
# plt.hist(runtime)
# plt.title("Runtime")
# plt.subplot(332)
# plt.hist(tokens)
# plt.title("Tokens")
# plt.subplot(333)
# plt.bar(range(len(rating)), list(rating.values()), tick_label=list(rating.keys()))
# plt.title("Rating Distribution")
# plt.subplot(334)
# plt.hist(imdb)
# plt.title("IMDB Scores")
# plt.subplot(335)
# plt.hist(tmdb)
# plt.title("TMDB Scores")
# plt.subplot(336)
# plt.hist(meta)
# plt.title("Metacritic Scores")
# plt.subplot(337)
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
ax.set_title("Language Distribution")
ax.bar(range(len(language)), list(language.values()), tick_label=list(language.keys()))
ax2.bar(range(len(language)), list(language.values()), tick_label=list(language.keys()))
ax.set_ylim(9900, 10000) # outliers only
ax2.set_ylim(0, 700) # most of the data
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
)
ax2.xaxis.tick_bottom()
d = .005 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs)
# plt.subplot(338)
# plt.bar(range(len(genres)), list(genres.values()), tick_label=list(genres.keys()))
# plt.title("Genre Distribution")
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.savefig('languages.png')
plt.show()
# All movies with meta_score_count > 0 also have imdb_score_count > 0
def filter(movie):
return movie['meta_score_count'] > 0
if __name__ == "__main__":
main()
|
# ABC015d
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
W = int(input())
N, K = map(int, input().split())
a = [list(map(int, input().split())) for _ in range(N)]
dp = [[[0]*(W+1) for i in range(K+1)] for _ in range(N+1)]
for i in range(1, N+1):
for j in range(1, K + 1):
for k in range(W + 1):
if k-a[i-1][0] >= 0:
dp[i][j][k] = max(dp[i-1][j - 1]
[k - a[i-1][0]] + a[i-1][1], dp[i-1][j][k])
else:
dp[i][j][k] = max(dp[i][j][k], dp[i-1][j][k])
#print(N, K, W, len(dp))
# print(dp[N][K])
print(dp[N][K][W])
|
"""
class Conta: # Classe
def __init__(self): # Método __init__
self.objeto = objeto # atributos
def funcao(): # Método
"""
import datetime
class Historico:
def __init__(self) -> None:
self.data_abertura = datetime.datetime.today()
self.transacoes = []
def imprime(self) -> None:
print(f'data abertura: {self.data_abertura}')
print('transações: ')
for t in self.transacoes:
print('-', t)
class Cliente:
def __init__(self, nome: str, sobrenome: str, cpf: str) -> str:
self.nome: str = nome
self.sobrenome: str = sobrenome
self.cpf: str = cpf
class Conta:
def __init__(
self, titular: str, numero: str, saldo: float, limite: float = 1000.0
) -> None:
print('inicializando uma conta')
self.titular: str = titular
self.numero: str = numero
self.saldo: float = saldo
self.limite: float = limite
self.historico = Historico()
def deposita(self, valor: float) -> None:
self.saldo += valor
self.historico.transacoes.append(f'depósito de {valor}')
def saca(self, valor: float) -> None:
if self.saldo < valor:
return False
else:
self.saldo -= valor
self.historico.transacoes.append(f'saque de {valor}')
def extrato(self) -> None:
print(f'numero: {self.numero} \nsaldo: {self.saldo}')
self.historico.transacoes.append(
f'tirou extrato - saldo de {self.saldo}'
)
def transfere_para(self, destino: float, valor: float) -> None:
retirou = self.saca(valor)
if retirou is False:
return False
else:
destino.deposita(valor)
self.historico.transacoes.append(
f'transferencia de {valor} para conta {destino.numero}'
)
return True
def pega_saldo(self) -> None:
return self._saldo # atributo protegido um undescore('_')
cliente1 = Cliente('João', 'Oliveira', '111.111.111-11')
cliente2 = Cliente('José', 'Azevedo', '222.222.222-22')
conta1 = Conta('123-4', cliente1, 1000.0)
conta2 = Conta('123-5', cliente2, 1000.0)
conta1.saca(40000)
conta1.extrato()
|
[ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'simd_types' : [],
'special' : ['crlibm'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 02/03/2011',
'included' : ['#include <nt2/include/functions/asinpi.hpp>'],
'notes' : [],
'stamp' : 'modified by jt the 02/03/2011',
},
'ranges' : {
'default' : [['T(-100)', 'T(100)']],
},
'specific_values' : {
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::crlibm::asinpi_ru(a0)'],
},
'property_value' : {
'default' : ['nt2::asinpi(a0)'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['1'],
},
},
},
},
]
|
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from pandas.tseries.offsets import CustomBusinessDay
import numpy as np
import datetime
def make_baseline(x_days, pivot, name="Temperature", freq="15min"):
baseline=pivot[pivot.index.isin(x_days)].mean(axis=0)
baseline_df=baseline.to_frame(name)
return baseline_df
def create_timeseries(df, event_index):
col=[]
df.columns=['demand']
for i in df.index:
hours=int(i)//1
minutes=(i%1)*60
#col.append(event_day+pd.Timedelta(hours=hours, minutes=minutes))
col.append(pd.Timestamp(event_index+' 00:00:00')+pd.Timedelta(hours=hours, minutes=minutes))
df["Time"]=col
adj_df=df.set_index(["Time"])
df=adj_df[adj_df.columns[0]]
return df
def select_demand(data): #removed _
demand = data.filter(regex="demand")
return demand
def create_pivot(data, freq="15min"): #removed _
if freq=="15min": # we are using 15 minute intervals so we can accurately calculate cost
data["date"] = data.index.date
data["combined"]=data.index.hour+(data.index.minute*(1.0/60.0))
data_multi=data.set_index(["date","combined"])
data_multi=data_multi[~data_multi.index.duplicated(keep='last')]
data_pivot = data_multi.unstack()
# remove double index
data_pivot.columns = data_pivot.columns.droplevel(0)
elif freq=="1h":
# add date and hour for new index
data["date"] = data.index.date
data["hour"] = data.index.hour
data_multi=data.set_index(["date","hour"])
data_multi=data_multi[~data_multi.index.duplicated(keep='last')]
# create pivot
data_pivot = data_multi.unstack()
# remove double index
data_pivot.columns = data_pivot.columns.droplevel(0)
return data_pivot
def _remove_event_day(data, event_day, PDP_dates): #removes all event days specified in the _PDP list above
try:
#data = data[~(data.index.date == event_index.date())]
data = data[~(data.index.date == event_day)]
for i in PDP_dates:
data=data[~(data.index.date == i)]
return data
except Exception as e:
print(e)
print("error in _remove_event_day")
return data
def _remove_WE_holidays_NaN(data):
no_WE = ~((data.index.weekday == 5) | (data.index.weekday == 6)) # remove if WE
cal = calendar()
start = datetime.datetime.strftime(data.index.min(),"%Y-%m-%d")
end =datetime.datetime.strftime(data.index.max(),"%Y-%m-%d")
hol_cal = cal.holidays(start=start, end=end)
no_hol = ~data.index.isin(hol_cal) # remove if it is a national holiday
no_NaN = ~data.isna().all(axis=1) # remove if has any NaN for any hour
return data[no_WE & no_hol & no_NaN]
def _get_last_Y_days(data, event_index, Y):
assert data.shape[0] >= Y, "not enough data for {} days".format(Y)
try:
start=data.index[0]
data=data[start:event_index] #test this
data = data.sort_index(ascending=False).iloc[0:Y,:]
return data
except Exception as e:
print(e)
print("data available only for {} days".format(data.shape[0]))
return data
def _get_X_in_Y(data, power_data, X=None, event_start_h=14, event_end_h=18, weather_event_data=None, include_last=False, weather_mapping=False, weather_data=None, method='max', ):
#choses the highest X days out of Y days (if weather_mapping is true, it choses the days with the highest OAT values)
if not X:
X=power_data.shape[0]
cols = np.arange(event_start_h, event_end_h+include_last*1)
if weather_mapping==True:
if method=='proximity': #chooses x days based on how close the weather is
rows=np.shape(weather_data)[0]
weather_event_day=weather_event_data
for i in range(rows-1):
weather_event_data=weather_event_data.append(weather_event_day, ignore_index=True)
weather_event_data=weather_event_data[cols]
weather_event_data.index=weather_data[cols].index
x_days=abs(weather_event_data-weather_data[cols]).sum(axis=1).sort_values(ascending=True)[0:X].index
else:
x_days=weather_data[cols].sum(axis=1).sort_values(ascending=False)[0:X].index
else:
x_days = power_data[cols].sum(axis=1).sort_values(ascending=False)[0:X].index
return data[data.index.isin(x_days)], x_days
def _get_adj_ratio(data,
event_data,
event_start_h=14,
min_ratio=1.0,
max_ratio=1.3):
# this is hardcoded, we may want to do it in a more flexible way
# strategy: 4 hours before the event, take the first 3 and average them
pre_event_period_start = event_start_h - 4
try:
ratio = event_data.iloc[:,(pre_event_period_start*4):(event_start_h-1)*4].mean().mean()/data.iloc[:,(pre_event_period_start*4):(event_start_h-1)*4].mean().mean()
# print(ratio)
except:
ratio=1
print('Error in calculating ratios')
#If you want to implement maximum and minimum restrictions uncomment lines below!
if ratio < min_ratio:
ratio=min_ratio
if ratio > max_ratio:
ratio=max_ratio
if np.isnan(ratio):
ratio=1
return ratio
"""
if method='proximity' (and weather-mapping=true), then it chooses the X days that are closest to the weather in the event day,
if method='max' it chooses the hottest x days out of y days.
"""
def get_X_in_Y_baseline(data, weather_pivot, event_day,PDP_dates,
event_index,
X=3,
Y=10,
event_start_h=12,
event_end_h=18,
include_last=False,
adj_ratio=True,
min_ratio=1.0,
max_ratio=1.3,
sampling="quarterly", weather_mapping=False, method='max'):
event_data= data[data.index.date == event_day]
data = _remove_event_day(data, event_index,PDP_dates)
data = _remove_WE_holidays_NaN(data)
weather_event_data=weather_pivot[weather_pivot.index.date == event_day]
weather_data=_remove_event_day(weather_pivot, event_index, PDP_dates)
weather_data = _remove_WE_holidays_NaN(weather_data)
data_y =_get_last_Y_days(data, event_index, Y)
days=data_y.index
weather_data=_get_last_Y_days(weather_data, event_index, Y)
data_x, x_days = _get_X_in_Y(data, power_data=data_y,
X=X,
event_start_h=event_start_h,
event_end_h=event_end_h,
weather_event_data=weather_event_data,
include_last=include_last, weather_mapping=weather_mapping, weather_data=weather_data, method=method)
if adj_ratio:
ratio = _get_adj_ratio(data_x, event_data,
event_start_h=event_start_h,
min_ratio=min_ratio,
max_ratio=max_ratio)
else:
ratio = 1
data_x = (data_x.mean()*ratio).to_frame() # baseline is the average of the days selected
data_x.columns = ["baseline"]
return data_x, days, event_data.T, x_days, ratio
def parse_date(date):
date=str(date)
yyyy=date[0:4]
mm=date[5:7]
dd=date[8:10]
return(int(yyyy),int(mm),int(dd))
def calculate_rmse(demand_baseline, event_index):
demand_pivot.fillna(method='bfill',inplace=True) # TODO find a better solution
RMSE=np.sqrt(mean_squared_error(demand_baseline,demand_pivot[demand_pivot.index==event_index].T))
return RMSE
def mape_vectorized_v2(a, b):
mask = a != 0
return (np.fabs(a - b)/a)[mask].mean()
|
from model.group import Group
testdata = [
Group(name="name1", header='header1', footer='footer1'),
Group(name="name2", header='header2', footer='footer2')
]
|
# Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestTestbook():
def setup_method(self, method):
self.driver = webdriver.Firefox()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_testbook(self):
self.driver.get("http://localhost/addressbook/")
self.driver.set_window_size(550, 814)
self.driver.find_element(By.NAME, "user").send_keys("admin")
self.driver.find_element(By.ID, "content").click()
self.driver.find_element(By.NAME, "pass").click()
self.driver.find_element(By.NAME, "pass").send_keys("secret")
self.driver.find_element(By.NAME, "pass").send_keys(Keys.ENTER)
self.driver.find_element(By.LINK_TEXT, "groups").click()
self.driver.find_element(By.NAME, "new").click()
self.driver.find_element(By.NAME, "group_name").click()
self.driver.find_element(By.NAME, "group_name").send_keys("qwert")
self.driver.find_element(By.NAME, "group_header").click()
self.driver.find_element(By.NAME, "group_header").send_keys("ertert")
self.driver.find_element(By.NAME, "group_footer").click()
self.driver.find_element(By.NAME, "group_footer").send_keys("ertertert")
self.driver.find_element(By.NAME, "submit").click()
|
"""Core app"""
from django.apps import AppConfig
class CoreConfig(AppConfig):
"""Core app config"""
name = 'raccoons.core'
verbose_name = 'Core'
|
import lib.commandable_state_machine as cmd_state_machine
import lib.state_machine as state_machine
class PausedState(cmd_state_machine.CommandableStateMachine):
def __init__(self):
super().__init__()
self.state_machine: state_machine.StateMachine
def enter(self, parent_state: 'state_machine.StateMachine'):
# TODO Stop playback, activate forward (and backward?) frame
# skipping
self._playback_paused = True
self.state_machine = parent_state
self._register_command(
key='p',
description='Resume playback.',
action=self.resume
)
parent_state._key_handler = self.pause_key_handler
def resume(self):
from ..state_implementations.initial_state import InitialState
self.state_machine._playback_paused = False
return InitialState
def pause_key_handler(self, key: str):
if key == 'j':
# TODO Previous frame?
pass
elif key == 'l':
# TODO Next frame
pass
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 52
# Methods Covered : 52
# Examples Total : 55
# Examples Tested : 55
# Coverage % : 100
# ----------------------
# current methods coverage:
# blob_containers: 13/13
# blob_services: 3/3
# encryption_scopes: 4/4
# file_services: 3/3
# file_shares: 5/5
# management_policies: 3/3
# operations: 1/1
# object_replication_policies_operations: 0/4
# private_endpoint_connections: 3/3
# private_link_resources: 1/1
# skus: 1/1
# storage_accounts: 10/14
# usages: 1/1
import datetime as dt
import unittest
import azure.mgmt.storage as az_storage
from devtools_testutils import AzureMgmtTestCase, RandomNameResourceGroupPreparer
AZURE_LOCATION = 'westeurope'
ZERO = dt.timedelta(0)
class UTC(dt.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class MgmtStorageTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtStorageTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
az_storage.StorageManagementClient
)
if self.is_live:
import azure.mgmt.network as az_network
self.network_client = self.create_mgmt_client(
az_network.NetworkManagementClient
)
def create_endpoint(self, group_name, location, vnet_name, sub_net, endpoint_name, resource_id):
if self.is_live:
# Create VNet
async_vnet_creation = self.network_client.virtual_networks.begin_create_or_update(
group_name,
vnet_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.result()
# Create Subnet
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
vnet_name,
sub_net,
{
'address_prefix': '10.0.0.0/24',
'private_link_service_network_policies': 'disabled',
'private_endpoint_network_policies': 'disabled'
}
)
subnet_info = async_subnet_creation.result()
# Create private endpoint
BODY = {
"location": location,
"properties": {
"privateLinkServiceConnections": [
{
"name": "myconnection",
# "private_link_service_id": "/subscriptions/" + self.settings.SUBSCRIPTION_ID + "/resourceGroups/" + group_name + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME + ""
"private_link_service_id": resource_id,
"group_ids": ["blob"]
}
],
"subnet": {
"id": "/subscriptions/" + self.settings.SUBSCRIPTION_ID + "/resourceGroups/" + group_name + "/providers/Microsoft.Network/virtualNetworks/" + vnet_name + "/subnets/" + sub_net
}
}
}
result = self.network_client.private_endpoints.begin_create_or_update(group_name, endpoint_name, BODY)
return result.result().id
else:
return "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/" + group_name + "/providers/Microsoft.Network/privateEndpoints/" + endpoint_name
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
def test_storage(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
STORAGE_ACCOUNT_NAME = "storageaccountxxyyzzn" # TODO: need change a random name, if need run live test again.
DEST_STORAGE_ACCOUNT_NAME = "storageaccountxxyyzznnx"
FILE_SERVICE_NAME = "fileservicexxyyzz"
SHARE_NAME = "filesharenamexxyyzz"
BLOB_SERVICE_NAME = "blobservicexxyyzz"
CONTAINER_NAME = "containernamexxyyzz"
ENCRYPTION_SCOPE_NAME = "encryptionscopexxyyzz"
IMMUTABILITY_POLICY_NAME = "immutabilitypolicynamexxyyzz"
VNET_NAME = "virualnetwork111"
SUB_NET = "subnet111"
LOAD_BALANCER = "loaderbalancer"
FIPCONFIG = "fipconfig123"
BAPOOL = "bapool123"
PROBES = "probe123"
PRIVATE_ENDPOINT = "endpoint123xxx"
PRIVATE_ENDPOINT_CONNECTION_NAME = "privateEndpointConnection"
OBJECT_REPLICATION_POLICY_NAME = "default"
# StorageAccountCreate[put]
BODY = {
"sku": {
"name": "Standard_GRS"
},
"kind": "StorageV2", # Storage v2 support policy
"location": "westeurope",
# TODO: The value 'True' is not allowed for property isHnsEnabled
# "is_hns_enabled": True,
# TODO:Unsupport
# "routing_preference": {
# "routing_choice": "MicrosoftRouting",
# "publish_microsoft_endpoints": True,
# "publish_internet_endpoints": True
# },
"encryption": {
"services": {
"file": {
"key_type": "Account",
"enabled": True
},
"blob": {
"key_type": "Account",
"enabled": True
}
},
"key_source": "Microsoft.Storage"
},
"tags": {
"key1": "value1",
"key2": "value2"
}
}
result = self.mgmt_client.storage_accounts.begin_create(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
storageaccount = result.result()
# Create destination storage account
# result = self.mgmt_client.storage_accounts.begin_create(resource_group.name, DEST_STORAGE_ACCOUNT_NAME, BODY)
# TODO: [Kaihui] feature is unavailable
# Create object replication policy
# BODY = {
# "source_account": STORAGE_ACCOUNT_NAME,
# "destination_account": DEST_STORAGE_ACCOUNT_NAME,
# "rules": []
# }
# result = self.mgmt_client.object_replication_policies.create_or_update(resource_group.name, STORAGE_ACCOUNT_NAME, OBJECT_REPLICATION_POLICY_NAME, BODY)
self.create_endpoint(
resource_group.name,
AZURE_LOCATION,
VNET_NAME,
SUB_NET,
PRIVATE_ENDPOINT,
storageaccount.id
)
# PutFileServices[put]
BODY = {
"cors": {
"cors_rules": [
{
"allowed_origins": [
"http://www.contoso.com",
"http://www.fabrikam.com"
],
"allowed_methods": [
"GET",
"HEAD",
"POST",
"OPTIONS",
"MERGE",
"PUT"
],
"max_age_in_seconds": "100",
"exposed_headers": [
"x-ms-meta-*"
],
"allowed_headers": [
"x-ms-meta-abc",
"x-ms-meta-data*",
"x-ms-meta-target*"
]
},
{
"allowed_origins": [
"*"
],
"allowed_methods": [
"GET"
],
"max_age_in_seconds": "2",
"exposed_headers": [
"*"
],
"allowed_headers": [
"*"
]
},
{
"allowed_origins": [
"http://www.abc23.com",
"https://www.fabrikam.com/*"
],
"allowed_methods": [
"GET",
"PUT"
],
"max_age_in_seconds": "2000",
"exposed_headers": [
"x-ms-meta-abc",
"x-ms-meta-data*",
"x-ms-meta-target*"
],
"allowed_headers": [
"x-ms-meta-12345675754564*"
]
}
]
}
}
result = self.mgmt_client.file_services.set_service_properties(resource_group.name, STORAGE_ACCOUNT_NAME, BODY["cors"])
# PutBlobServices[put]
BODY = {
"cors": {
"cors_rules": [
{
"allowed_origins": [
"http://www.contoso.com",
"http://www.fabrikam.com"
],
"allowed_methods": [
"GET",
"HEAD",
"POST",
"OPTIONS",
"MERGE",
"PUT"
],
"max_age_in_seconds": "100",
"exposed_headers": [
"x-ms-meta-*"
],
"allowed_headers": [
"x-ms-meta-abc",
"x-ms-meta-data*",
"x-ms-meta-target*"
]
},
{
"allowed_origins": [
"*"
],
"allowed_methods": [
"GET"
],
"max_age_in_seconds": "2",
"exposed_headers": [
"*"
],
"allowed_headers": [
"*"
]
},
{
"allowed_origins": [
"http://www.abc23.com",
"https://www.fabrikam.com/*"
],
"allowed_methods": [
"GET",
"PUT"
],
"max_age_in_seconds": "2000",
"exposed_headers": [
"x-ms-meta-abc",
"x-ms-meta-data*",
"x -ms-meta-target*"
],
"allowed_headers": [
"x-ms-meta-12345675754564*"
]
}
]
},
"default_service_version": "2017-07-29",
"delete_retention_policy": {
"enabled": True,
"days": "300"
},
# "is_versioning_enabled": True,
# TODO: unsupport
# "change_feed": {
# "enabled": True
# }
}
result = self.mgmt_client.blob_services.set_service_properties(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# StorageAccountPutEncryptionScope[put]
BODY = {
"source": "Microsoft.Storage",
"state": "Enabled"
}
result = self.mgmt_client.encryption_scopes.put(resource_group.name, STORAGE_ACCOUNT_NAME, ENCRYPTION_SCOPE_NAME, BODY)
MANAGEMENT_POLICY_NAME = "managementPolicy"
# StorageAccountSetManagementPolicies[put]
BODY = {
"policy": {
"rules": [
{
"enabled": True,
"name": "olcmtest",
"type": "Lifecycle",
"definition": {
"filters": {
"blob_types": [
"blockBlob"
],
"prefix_match": [
"olcmtestcontainer"
]
},
"actions": {
"base_blob": {
"tier_to_cool": {
"days_after_modification_greater_than": "30"
},
"tier_to_archive": {
"days_after_modification_greater_than": "90"
},
"delete": {
"days_after_modification_greater_than": "1000"
}
},
"snapshot": {
"delete": {
"days_after_creation_greater_than": "30"
}
}
}
}
}
]
}
}
result = self.mgmt_client.management_policies.create_or_update(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# PutShares[put]
result = self.mgmt_client.file_shares.create(resource_group.name, STORAGE_ACCOUNT_NAME, SHARE_NAME, {})
# StorageAccountGetProperties[get]
storageaccount = self.mgmt_client.storage_accounts.get_properties(resource_group.name, STORAGE_ACCOUNT_NAME)
# PRIVATE_ENDPOINT_CONNECTION_NAME = "privateEndpointConnection"
PRIVATE_ENDPOINT_CONNECTION_NAME = storageaccount.private_endpoint_connections[0].name
# StorageAccountPutPrivateEndpointConnection[put]
BODY = {
"private_link_service_connection_state": {
# "status": "Approved",
"status": "Rejected", # it has been approved, so test `Rejected`
"description": "Auto-Approved"
}
}
result = self.mgmt_client.private_endpoint_connections.put(resource_group.name, STORAGE_ACCOUNT_NAME, PRIVATE_ENDPOINT_CONNECTION_NAME, BODY)
# PutContainers[put]
result = self.mgmt_client.blob_containers.create(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, {})
# CreateOrUpdateImmutabilityPolicy[put]
BODY = {
"immutability_period_since_creation_in_days": "3",
"allow_protected_append_writes": True
}
result = self.mgmt_client.blob_containers.create_or_update_immutability_policy(
resource_group.name,
STORAGE_ACCOUNT_NAME,
CONTAINER_NAME,
parameters=BODY)
ETAG = result.etag
# DeleteImmutabilityPolicy[delete]
result = self.mgmt_client.blob_containers.delete_immutability_policy(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, ETAG)
# CreateOrUpdateImmutabilityPolicy[put] again
BODY = {
"immutability_period_since_creation_in_days": "3",
"allow_protected_append_writes": True
}
result = self.mgmt_client.blob_containers.create_or_update_immutability_policy(
resource_group.name,
STORAGE_ACCOUNT_NAME,
CONTAINER_NAME,
parameters=BODY)
ETAG = result.etag
# TODO: [Kaihui] feature is unavailable
# Get object replication policy
# result = self.mgmt_client.object_replication_policies.get(resource_group.name, STORAGE_ACCOUNT_NAME, OBJECT_REPLICATION_POLICY_NAME)
# GetImmutabilityPolicy[get]
result = self.mgmt_client.blob_containers.get_immutability_policy(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME)
# GetContainers[get]
result = self.mgmt_client.blob_containers.get(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME)
# StorageAccountGetPrivateEndpointConnection[get]
result = self.mgmt_client.private_endpoint_connections.get(resource_group.name, STORAGE_ACCOUNT_NAME, PRIVATE_ENDPOINT_CONNECTION_NAME)
# GetShares[get]
result = self.mgmt_client.file_shares.get(resource_group.name, STORAGE_ACCOUNT_NAME, SHARE_NAME)
# StorageAccountGetManagementPolicies[get]
result = self.mgmt_client.management_policies.get(resource_group.name, STORAGE_ACCOUNT_NAME)
# ListContainers[get]
result = self.mgmt_client.blob_containers.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# StorageAccountGetEncryptionScope[get]
result = self.mgmt_client.encryption_scopes.get(resource_group.name, STORAGE_ACCOUNT_NAME, ENCRYPTION_SCOPE_NAME)
# ListShares[get]
result = self.mgmt_client.file_shares.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# GetBlobServices[get]
result = self.mgmt_client.blob_services.get_service_properties(resource_group.name, STORAGE_ACCOUNT_NAME)
# GetFileServices[get]
result = self.mgmt_client.file_services.get_service_properties(resource_group.name, STORAGE_ACCOUNT_NAME)
# StorageAccountListPrivateLinkResources[get]
result = self.mgmt_client.private_link_resources.list_by_storage_account(resource_group.name, STORAGE_ACCOUNT_NAME)
# StorageAccountEncryptionScopeList[get]
result = self.mgmt_client.encryption_scopes.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# TODO: [Kaihui] feature is unavailable
# List object replication policy
# result = self.mgmt_client.object_replication_policies.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# ListBlobServices[get]
result = self.mgmt_client.blob_services.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# ListFileServices[get]
result = self.mgmt_client.file_services.list(resource_group.name, STORAGE_ACCOUNT_NAME)
# StorageAccountGetProperties[get]
result = self.mgmt_client.storage_accounts.get_properties(resource_group.name, STORAGE_ACCOUNT_NAME)
# StorageAccountListByResourceGroup[get]
result = self.mgmt_client.storage_accounts.list_by_resource_group(resource_group.name)
LOCATION_NAME = "westeurope"
# UsageList[get]
result = self.mgmt_client.usages.list_by_location(LOCATION_NAME)
# StorageAccountList[get]
result = self.mgmt_client.storage_accounts.list()
# SkuList[get]
result = self.mgmt_client.skus.list()
# OperationsList[get]
result = self.mgmt_client.operations.list()
# SetLegalHoldContainers[post]
BODY = {
"tags": [
"tag1",
"tag2",
"tag3"
]
}
result = self.mgmt_client.blob_containers.set_legal_hold(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, BODY)
# ClearLegalHoldContainers[post]
BODY = {
"tags": [
"tag1",
"tag2",
"tag3"
]
}
result = self.mgmt_client.blob_containers.clear_legal_hold(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, BODY)
# Acquire a lease on a container[post]
BODY = {
"action": "Acquire",
"lease_duration": "-1"
}
result = self.mgmt_client.blob_containers.lease(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, BODY)
LEASE_ID = result.lease_id
# Break a lease on a container[post]
BODY = {
"action": "Break",
"lease_id": LEASE_ID
}
result = self.mgmt_client.blob_containers.lease(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, BODY)
# UpdateContainers[patch]
BODY = {
"public_access": "Container",
"metadata": {
"metadata": "true"
}
}
result = self.mgmt_client.blob_containers.update(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, BODY)
# UpdateShares[patch]
BODY = {
"properties": {
"metadata": {
"type": "image"
}
}
}
result = self.mgmt_client.file_shares.update(resource_group.name, STORAGE_ACCOUNT_NAME, SHARE_NAME, BODY)
# StorageAccountPatchEncryptionScope[patch]
# BODY = {
# "source": "Microsoft.KeyVault",
# "key_vault_properties": {
# "key_uri": "https://testvault.vault.core.windows.net/keys/key1/863425f1358359c"
# }
# }
BODY = {
"source": "Microsoft.Storage",
"state": "Enabled"
}
result = self.mgmt_client.encryption_scopes.patch(resource_group.name, STORAGE_ACCOUNT_NAME, ENCRYPTION_SCOPE_NAME, BODY)
# StorageAccountRevokeUserDelegationKeys[post]
result = self.mgmt_client.storage_accounts.revoke_user_delegation_keys(resource_group.name, STORAGE_ACCOUNT_NAME)
# # TODO: FeatureUnavailableInLocation
# # # BlobRangesRestore[post]
# time_to_restore = (dt.datetime.now(tz=UTC()) - dt.timedelta(minutes=10)).isoformat()
# BODY = {
# "time_to_restore": time_to_restore,
# "blob_ranges": [
# {
# "start_range": "container/blobpath1",
# "end_range": "container/blobpath2"
# },
# {
# "start_range": "container2/blobpath3",
# "end_range": ""
# }
# ]
# }
# result = self.mgmt_client.storage_accounts.restore_blob_ranges(resource_group.name, STORAGE_ACCOUNT_NAME, BODY["time_to_restore"], BODY["blob_ranges"])
# result = result.result()
# # TODO: Wrong parameters
# StorageAccountListServiceSAS[post]
# signed_expiry = (dt.datetime.now(tz=UTC()) - dt.timedelta(days=2)).isoformat()
# BODY = {
# "canonicalized_resource": "/blob/sto1299/music",
# "signed_resource": "c",
# "signed_permission": "l",
# "signed_expiry": signed_expiry
# }
# result = self.mgmt_client.storage_accounts.list_service_sas(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# TODO: Wrong parameters
# # StorageAccountListAccountSAS[post]
# signed_start = dt.datetime.now(tz=UTC()).isoformat()
# BODY = {
# "signed_services": "b",
# "signed_resource_types": "s",
# "signed_permission": "r",
# "signed_protocol": "https,http",
# "signed_start": signed_start,
# "signed_expiry": signed_expiry,
# "key_to_sign": "key1"
# }
# result = self.mgmt_client.storage_accounts.list_account_sas(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# StorageAccountRegenerateKey[post]
BODY = {
"key_name": "key2"
}
result = self.mgmt_client.storage_accounts.regenerate_key(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
""" TODO: Key name kerb2 is not valid.
# StorageAccountRegenerateKerbKey[post]
# BODY = {
# "key_name": "kerb2"
# }
KEY_NAME = "kerb2"
result = self.mgmt_client.storage_accounts.regenerate_key(resource_group.name, STORAGE_ACCOUNT_NAME, KEY_NAME)
"""
# StorageAccountListKeys[post]
result = self.mgmt_client.storage_accounts.list_keys(resource_group.name, STORAGE_ACCOUNT_NAME)
# """ TODO: FeatureUnavailableInLocation
# # StorageAccountEnableAD[patch]
# BODY = {
# "azure_files_identity_based_authentication": {
# "directory_service_options": "AD",
# "active_directory_properties": {
# "domain_name": "adtest.com",
# "net_bios_domain_name": "adtest.com",
# "forest_name": "adtest.com",
# "domain_guid": "aebfc118-9fa9-4732-a21f-d98e41a77ae1",
# "domain_sid": "S-1-5-21-2400535526-2334094090-2402026252",
# "azure_storage_sid": "S-1-5-21-2400535526-2334094090-2402026252-0012"
# }
# }
# }
# result = self.mgmt_client.storage_accounts.update(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# """
# StorageAccountUpdate[patch]
BODY = {
"network_acls": {
"default_action": "Allow"
},
# TODO: Message: Routing Preferences is not supported for the account.
# "routing_preference": {
# "routing_choice": "MicrosoftRouting",
# "publish_microsoft_endpoints": True,
# "publish_internet_endpoints": True
# },
"encryption": {
"services": {
"file": {
"key_type": "Account",
"enabled": True
},
"blob": {
"key_type": "Account",
"enabled": True
}
},
"key_source": "Microsoft.Storage"
}
}
result = self.mgmt_client.storage_accounts.update(resource_group.name, STORAGE_ACCOUNT_NAME, BODY)
# StorageAccountFailover
# [ZIM] tis testcase fails
# TODO: [Kaihui] about this issue: https://github.com/Azure/azure-sdk-for-python/issues/11292
# result = self.mgmt_client.storage_accounts.begin_failover(resource_group.name, STORAGE_ACCOUNT_NAME)
#result = result.result()
# LockImmutabilityPolicy[post]
result = self.mgmt_client.blob_containers.lock_immutability_policy(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, ETAG)
ETAG = result.etag
# ExtendImmutabilityPolicy[post]
BODY = {
"immutability_period_since_creation_in_days": "100"
}
result = self.mgmt_client.blob_containers.extend_immutability_policy(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME, ETAG, BODY)
ETAG = result.etag
# StorageAccountCheckNameAvailability[post]
BODY = {
"name": "sto3363",
"type": "Microsoft.Storage/storageAccounts"
}
result = self.mgmt_client.storage_accounts.check_name_availability(BODY)
# DeleteContainers[delete]
result = self.mgmt_client.blob_containers.delete(resource_group.name, STORAGE_ACCOUNT_NAME, CONTAINER_NAME)
# # StorageAccountDeletePrivateEndpointConnection[delete]
result = self.mgmt_client.private_endpoint_connections.delete(resource_group.name, STORAGE_ACCOUNT_NAME, PRIVATE_ENDPOINT_CONNECTION_NAME)
# DeleteShares[delete]
result = self.mgmt_client.file_shares.delete(resource_group.name, STORAGE_ACCOUNT_NAME, SHARE_NAME)
# StorageAccountDeleteManagementPolicies[delete]
result = self.mgmt_client.management_policies.delete(resource_group.name, STORAGE_ACCOUNT_NAME)
# TODO: [Kaihui] feature is unavailable
# Delete object replication policy
# result = self.mgmt_client.object_replication_policies.delete(resource_group.name, STORAGE_ACCOUNT_NAME, OBJECT_REPLICATION_POLICY_NAME)
# StorageAccountDelete[delete]
result = self.mgmt_client.storage_accounts.delete(resource_group.name, STORAGE_ACCOUNT_NAME)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# external files
import numpy as np
import pickle as pk
import torch.optim as optim
from datetime import datetime
import os, time, argparse, csv
from collections import Counter
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch_geometric.datasets import WebKB, WikipediaNetwork, WikiCS
# internal files
from layer.DiGCN import *
from utils.Citation import *
from layer.geometric_baselines import *
from torch_geometric.utils import to_undirected
from utils.preprocess import geometric_dataset, load_syn
from utils.save_settings import write_log
from utils.hermitian import hermitian_decomp
from utils.edge_data import get_appr_directed_adj, get_second_directed_adj
# select cuda device if available
cuda_device = 0
device = torch.device("cuda:%d" % cuda_device if torch.cuda.is_available() else "cpu")
def parse_args():
parser = argparse.ArgumentParser(description="baseline--Digraph")
parser.add_argument('--log_root', type=str, default='../logs/', help='the path saving model.t7 and the training process')
parser.add_argument('--log_path', type=str, default='test', help='the path saving model.t7 and the training process, the name of folder will be log/(current time)')
parser.add_argument('--data_path', type=str, default='../dataset/data/tmp/', help='data set folder, for default format see dataset/cora/cora.edges and cora.node_labels')
parser.add_argument('--dataset', type=str, default='WebKB/Cornell', help='data set selection')
parser.add_argument('--epochs', type=int, default=1500, help='training epochs')
parser.add_argument('--num_filter', type=int, default=2, help='num of filters')
parser.add_argument('--p_q', type=float, default=0.95, help='direction strength, from 0.5 to 1.')
parser.add_argument('--p_inter', type=float, default=0.1, help='inter_cluster edge probabilities.')
parser.add_argument('--method_name', type=str, default='DiG', help='method name')
parser.add_argument('--seed', type=int, default=0, help='random seed for training testing split/random graph generation')
parser.add_argument('--dropout', type=float, default=0.0, help='dropout prob')
parser.add_argument('--debug', '-D', action='store_true', help='debug mode')
parser.add_argument('--new_setting', '-NS', action='store_true', help='whether not to load best settings')
parser.add_argument('--layer', type=int, default=2, help='number of layers (2 or 3), default: 2')
parser.add_argument('--lr', type=float, default=5e-3, help='learning rate')
parser.add_argument('--l2', type=float, default=5e-4, help='l2 regularizer')
parser.add_argument('--alpha', type=float, default=0.1, help='alpha teleport prob')
return parser.parse_args()
def acc(pred, label, mask):
correct = int(pred[mask].eq(label[mask]).sum().item())
acc = correct / int(mask.sum())
return acc
def main(args):
date_time = datetime.now().strftime('%m-%d-%H:%M:%S')
log_path = os.path.join(args.log_root, args.log_path, args.save_name, date_time)
if os.path.isdir(log_path) == False:
try:
os.makedirs(log_path)
except FileExistsError:
print('Folder exists!')
load_func, subset = args.dataset.split('/')[0], args.dataset.split('/')[1]
if load_func == 'WebKB':
load_func = WebKB
dataset = load_func(root=args.data_path, name=subset)
elif load_func == 'WikipediaNetwork':
load_func = WikipediaNetwork
dataset = load_func(root=args.data_path, name=subset)
elif load_func == 'WikiCS':
load_func = WikiCS
dataset = load_func(root=args.data_path)
elif load_func == 'cora_ml':
dataset = citation_datasets(root='../dataset/data/tmp/cora_ml/cora_ml.npz')
elif load_func == 'citeseer_npz':
dataset = citation_datasets(root='../dataset/data/tmp/citeseer_npz/citeseer_npz.npz')
else:
dataset = load_syn(args.data_path + args.dataset, None)
if os.path.isdir(log_path) == False:
os.makedirs(log_path)
data = dataset[0]
if not data.__contains__('edge_weight'):
data.edge_weight = None
else:
data.edge_weight = torch.FloatTensor(data.edge_weight)
data.y = data.y.long()
num_classes = (data.y.max() - data.y.min() + 1).detach().numpy()
# normalize label, the minimum should be 0 as class index
splits = data.train_mask.shape[1]
if len(data.test_mask.shape) == 1:
data.test_mask = data.test_mask.unsqueeze(1).repeat(1, splits)
edge_index1, edge_weights1 = get_appr_directed_adj(args.alpha, data.edge_index.long(), data.y.size(-1), data.x.dtype, data.edge_weight)
edge_index1 = edge_index1.to(device)
edge_weights1 = edge_weights1.to(device)
if args.method_name[-2:] == 'ib':
edge_index2, edge_weights2 = get_second_directed_adj(data.edge_index.long(), data.y.size(-1), data.x.dtype, data.edge_weight)
edge_index2 = edge_index2.to(device)
edge_weights2 = edge_weights2.to(device)
edges = (edge_index1, edge_index2)
edge_weight = (edge_weights1, edge_weights2)
del edge_index2, edge_weights2
else:
edges = edge_index1
edge_weight = edge_weights1
del edge_index1, edge_weights1
data = data.to(device)
results = np.zeros((splits, 4))
for split in range(splits):
log_str_full = ''
if not args.method_name[-2:] == 'ib':
graphmodel = DiModel(data.x.size(-1), num_classes, filter_num=args.num_filter,
dropout=args.dropout, layer=args.layer).to(device)
else:
graphmodel = DiGCN_IB(data.x.size(-1), hidden=args.num_filter,
num_classes=num_classes, dropout=args.dropout,
layer=args.layer).to(device)
model = graphmodel # nn.DataParallel(graphmodel)
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
#################################
# Train/Validation/Test
#################################
best_test_err = 1000.0
early_stopping = 0
for epoch in range(args.epochs):
start_time = time.time()
####################
# Train
####################
train_loss, train_acc = 0.0, 0.0
# for loop for batch loading
model.train()
out = model(data.x, edges, edge_weight)
train_loss = F.nll_loss(out[data.train_mask[:,split]], data.y[data.train_mask[:,split]])
pred_label = out.max(dim = 1)[1]
train_acc = acc(pred_label, data.y, data.train_mask[:,split])
opt.zero_grad()
train_loss.backward()
opt.step()
outstrtrain = 'Train loss:, %.6f, acc:, %.3f,' % (train_loss.detach().item(), train_acc)
#scheduler.step()
####################
# Validation
####################
model.eval()
test_loss, test_acc = 0.0, 0.0
out = model(data.x, edges, edge_weight)
pred_label = out.max(dim = 1)[1]
test_loss = F.nll_loss(out[data.val_mask[:,split]], data.y[data.val_mask[:,split]])
test_acc = acc(pred_label, data.y, data.val_mask[:,split])
outstrval = ' Test loss:, %.6f, acc: ,%.3f,' % (test_loss.detach().item(), test_acc)
duration = "---, %.4f, seconds ---" % (time.time() - start_time)
log_str = ("%d, / ,%d, epoch," % (epoch, args.epochs))+outstrtrain+outstrval+duration
log_str_full += log_str + '\n'
#print(log_str)
####################
# Save weights
####################
save_perform = test_loss.detach().item()
if save_perform <= best_test_err:
early_stopping = 0
best_test_err = save_perform
torch.save(model.state_dict(), log_path + '/model'+str(split)+'.t7')
else:
early_stopping += 1
if early_stopping > 500 or epoch == (args.epochs-1):
torch.save(model.state_dict(), log_path + '/model_latest'+str(split)+'.t7')
break
write_log(vars(args), log_path)
####################
# Testing
####################
model.load_state_dict(torch.load(log_path + '/model'+str(split)+'.t7'))
model.eval()
preds = model(data.x, edges, edge_weight)
pred_label = preds.max(dim = 1)[1]
np.save(log_path + '/pred' + str(split), pred_label.to('cpu'))
acc_train = acc(pred_label, data.y, data.val_mask[:,split])
acc_test = acc(pred_label, data.y, data.test_mask[:,split])
model.load_state_dict(torch.load(log_path + '/model_latest'+str(split)+'.t7'))
model.eval()
preds = model(data.x, edges, edge_weight)
pred_label = preds.max(dim = 1)[1]
np.save(log_path + '/pred_latest' + str(split), pred_label.to('cpu'))
acc_train_latest = acc(pred_label, data.y, data.val_mask[:,split])
acc_test_latest = acc(pred_label, data.y, data.test_mask[:,split])
####################
# Save testing results
####################
logstr = 'val_acc: '+str(np.round(acc_train, 3))+' test_acc: '+str(np.round(acc_test,3))+' val_acc_latest: '+str(np.round(acc_train_latest,3))+' test_acc_latest: '+str(np.round(acc_test_latest,3))
print(logstr)
results[split] = [acc_train, acc_test, acc_train_latest, acc_test_latest]
log_str_full += logstr
with open(log_path + '/log'+str(split)+'.csv', 'w') as file:
file.write(log_str_full)
file.write('\n')
torch.cuda.empty_cache()
return results
if __name__ == "__main__":
args = parse_args()
if args.debug:
args.epochs = 1
if args.dataset[:3] == 'syn':
if args.dataset[4:7] == 'syn':
if args.p_q not in [-0.08, -0.05]:
args.dataset = 'syn/syn'+str(int(100*args.p_q))+'Seed'+str(args.seed)
elif args.p_q == -0.08:
args.p_inter = -args.p_q
args.dataset = 'syn/syn2Seed'+str(args.seed)
elif args.p_q == -0.05:
args.p_inter = -args.p_q
args.dataset = 'syn/syn3Seed'+str(args.seed)
elif args.dataset[4:10] == 'cyclic':
args.dataset = 'syn/cyclic'+str(int(100*args.p_q))+'Seed'+str(args.seed)
else:
args.dataset = 'syn/fill'+str(int(100*args.p_q))+'Seed'+str(args.seed)
dir_name = os.path.join(os.path.dirname(os.path.realpath(
__file__)), '../result_arrays',args.log_path,args.dataset+'/')
args.log_path = os.path.join(args.log_path,args.method_name, args.dataset)
if not args.new_setting:
if args.dataset[:3] == 'syn':
if args.dataset[4:7] == 'syn':
setting_dict = pk.load(open('./syn_settings.pk','rb'))
dataset_name_dict = {
0.95:1, 0.9:4,0.85:5,0.8:6,0.75:7,0.7:8,0.65:9,0.6:10
}
if args.p_inter == 0.1:
dataset = 'syn/syn' + str(dataset_name_dict[args.p_q])
elif args.p_inter == 0.08:
dataset = 'syn/syn2'
elif args.p_inter == 0.05:
dataset = 'syn/syn3'
else:
raise ValueError('Please input the correct p_q and p_inter values!')
elif args.dataset[4:10] == 'cyclic':
setting_dict = pk.load(open('./Cyclic_setting_dict.pk','rb'))
dataset_name_dict = {
0.95:0, 0.9:1,0.85:2,0.8:3,0.75:4,0.7:5,0.65:6
}
dataset = 'syn/syn_tri_' + str(dataset_name_dict[args.p_q])
else:
setting_dict = pk.load(open('./Cyclic_fill_setting_dict.pk','rb'))
dataset_name_dict = {
0.95:0, 0.9:1,0.85:2,0.8:3
}
dataset = 'syn/syn_tri_' + str(dataset_name_dict[args.p_q]) + '_fill'
setting_dict_curr = setting_dict[dataset][args.method_name].split(',')
args.alpha = float(setting_dict_curr[setting_dict_curr.index('alpha')+1])
try:
args.num_filter = int(setting_dict_curr[setting_dict_curr.index('num_filter')+1])
except ValueError:
try:
args.num_filter = int(setting_dict_curr[setting_dict_curr.index('num_filters')+1])
except ValueError:
pass
args.lr = float(setting_dict_curr[setting_dict_curr.index('lr')+1])
try:
args.layer = int(setting_dict_curr[setting_dict_curr.index('layer')+1])
except ValueError:
pass
if os.path.isdir(dir_name) == False:
try:
os.makedirs(dir_name)
except FileExistsError:
print('Folder exists!')
save_name = args.method_name + 'lr' + str(int(args.lr*1000)) + 'num_filters' + str(int(args.num_filter)) + 'alpha' + str(int(100*args.alpha)) + 'layer' + str(int(args.layer))
args.save_name = save_name
results = main(args)
np.save(dir_name+save_name, results)
|
from flask import url_for
def test_usergroups(client, access_token):
token = access_token
res = client.get(url_for('usergroups'), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert len(res.json) > 0
assert res.json[0]['id'] == 1
assert res.json[0]['name'] == "default"
assert res.json[0]['comment'] == "created by sshportal"
assert 'users' in res.json[0]
assert 'acls' in res.json[0]
assert res.json[0]['users'][0]['id'] == 1
assert res.json[0]['acls'][0]['id'] == 1
def test_usergroup_id(client, access_token):
token = access_token
res = client.get(url_for('usergroupid', id=1), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'users' in res.json
assert 'acls' in res.json
assert res.json['users'][0]['id'] == 1
assert res.json['acls'][0]['id'] == 1
def test_usergroup_name(client, access_token):
token = access_token
headers = {'authorization': "Bearer {token}".format(token=token)}
res = client.get(url_for('usergroupname', name="default"), headers=headers)
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'users' in res.json
assert 'acls' in res.json
assert res.json['users'][0]['id'] == 1
assert res.json['acls'][0]['id'] == 1
|
import re
from .. import Environ as ENV
from ..Interfaces import Downloadable, Fileable
from ..Models import normal, yId
from pprint import pprint
################################################################################
################################################################################
################################################################################
################################################################################
class Player(Downloadable, Fileable):
_info = None
def __init__(self, *args, **kwargs):
Downloadable.__init__(self, *args, **kwargs)
Fileable.__init__(self, self._info, *args, **kwargs)
self.playerId = None
# pprint(self.info)
def create(self, playerId):
print("New {} Player {}".format(self.info["leagueId"], playerId))
self.playerId = playerId
self.setUrl()
self.setFilePath()
try:
self.read()
except FileNotFoundError:
self.parseData()
self.write()
pprint(self.info)
def setFilePath(self):
self.filePath = ENV.playerFilePath.format(self.info, self.playerId)
def setUrl(self):
self.url = ENV.playerUrl.format(self.info, self.playerId)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
player["draft_team"].pop("team")
pprint(player)
raise
################################################################################
################################################################################
class NBAPlayer(Player):
_info = {
"leagueId": "nba",
"slugId": "nba",
"player_id": -1,
"first_name": "N/A",
"last_name": "N/A",
"bio": {},
"draft": {},
"pos_id": -1,
"headshot": None
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
try:
player["draft_team"].pop("team")
player["draft_team"]["team_id"] = yId(player["draft_team"]["team_id"])
except:
player["draft_team"] = {}
self.info["player_id"] = yId(player["player_id"])
self.info["first_name"] = normal(player["first_name"])
self.info["last_name"] = normal(player["last_name"])
self.info["bio"] = player["bio"]
self.info["draft"] = player["draft_team"]
self.info["pos_id"] = yId(player["primary_position_id"])
try:
self.info["headshot"] = re.search("https://s.yimg.com/xe/i/us/sp/v/nba_cutout/players_l/\d*/\d*.png", player["image"]).group(0)
except AttributeError:
pass
pprint(self.info)
################################################################################
################################################################################
class MLBPlayer(Player):
_info = {
"leagueId": "mlb",
"slugId": "mlb",
"player_id": -1,
"first_name": "N/A",
"last_name": "N/A",
"bio": {},
"draft": {},
"bat": "N/A",
"throw": "N/A",
"pos_id": -1,
"headshot": None
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
try:
player["draft_team"].pop("team")
player["draft_team"]["team_id"] = yId(player["draft_team"]["team_id"])
except KeyError:
player["draft_team"] = {}
self.info["player_id"] = yId(player["player_id"])
self.info["first_name"] = normal(player["first_name"])
self.info["last_name"] = normal(player["last_name"])
self.info["bio"] = player["bio"]
self.info["draft"] = player["draft_team"]
self.info["bat"] = player["bat"]
self.info["throw"] = player["throw"]
self.info["pos_id"] = yId(player["primary_position_id"])
try:
self.info["headshot"] = re.search("https://s.yimg.com/xe/i/us/sp/v/mlb_cutout/players_l/\d*/\d*.png", player["image"]).group(0)
except:
pass
################################################################################
################################################################################
class NFLPlayer(Player):
_info = {
"leagueId": "nfl",
"slugId": "nfl",
"player_id": -1,
"first_name": "N/A",
"last_name": "N/A",
"bio": {},
"draft": {},
"pos_id": -1,
"headshot": None
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
try:
player["draft_team"].pop("team")
player["draft_team"]["team_id"] = yId(player["draft_team"]["team_id"])
except KeyError:
player["draft_team"] = {}
self.info["player_id"] = yId(player["player_id"])
self.info["first_name"] = normal(player["first_name"])
self.info["last_name"] = normal(player["last_name"])
self.info["bio"] = player["bio"]
self.info["draft"] = player["draft_team"]
self.info["pos_id"] = yId(player["primary_position_id"])
try:
self.info["headshot"] = re.search("https://s.yimg.com/xe/i/us/sp/v/nfl_cutout/players_l/\d*/\d*.png", player["image"]).group(0)
except:
pass
################################################################################
################################################################################
class NCAAFPlayer(Player):
_info = {
"leagueId": "ncaaf",
"slugId": "ncaaf",
"player_id": -1,
"first_name": "N/A",
"last_name": "N/A",
"bio": {},
"pos_id": -1,
"team_id": -1,
"uni_num": -1
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
self.info["player_id"] = yId(player["player_id"])
self.info["first_name"] = normal(player["first_name"])
self.info["last_name"] = normal(player["last_name"])
self.info["bio"] = player["bio"]
self.info["pos_id"] = yId(player["primary_position_id"])
self.info["team_id"] = yId(player["team_id"])
self.info["uni_num"] = player["uniform_number"]
################################################################################
################################################################################
class NCAABPlayer(Player):
_info = {
"leagueId": "ncaab",
"slugId": "ncaab",
"player_id": -1,
"first_name": "N/A",
"last_name": "N/A",
"bio": {},
"pos_id": -1,
"team_id": -1,
"uni_num": -1
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def parseData(self):
stores = self.downloadItem()
entityId = stores["PageStore"]["pageData"]["entityId"]
player = stores["PlayersStore"]["players"][entityId]
self.info["player_id"] = yId(player["player_id"])
self.info["first_name"] = normal(player["first_name"])
self.info["last_name"] = normal(player["last_name"])
self.info["bio"] = player["bio"]
self.info["pos_id"] = yId(player["primary_position_id"])
self.info["team_id"] = yId(player["team_id"])
self.info["uni_num"] = player["uniform_number"]
|
import json
from django.test import TestCase
from rest_framework.test import APITestCase
from rest_framework.reverse import reverse
from .views import BookViewset, ExternalBookView
from .models import Book, Author
class ExternalBookViewTest(TestCase):
def test_get_without_data(self):
"""
Test URL without a valid book name
"""
url = '{}?name=Test'.format(reverse('external_books'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
'status_code': 200,
'status': 'success',
'data': []
})
def test_get_with_data(self):
"""
Test URL with a valid book name
"""
url = '{}?name=A%20Game%20of%20Thrones'.format(reverse('external_books'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
"status_code": 200,
"status": "success",
"data": [
{
"name": "A Game of Thrones",
"isbn": "978-0553103540",
"authors": [
"George R. R. Martin"
],
"number_of_pages": 694,
"publisher": "Bantam Books",
"country": "United States",
"release_date": "1996-08-01"
}
]
})
class BookViewset(APITestCase):
def setUp(self):
self.author = Author(name="Test Author")
self.author.save()
self.book = Book(
name="Test Book",
isbn="123456788",
number_of_pages=888,
publisher="Test Publisher",
country="India",
release_date="2019-05-28"
)
self.book.save()
self.book.authors.add(self.author)
def test_create(self):
"""
Test CREATE method
"""
test_data = {
"name": "Sample Book",
"isbn": "123456789",
"authors": [{
"name": "Sample Author"
}],
"number_of_pages": 999,
"publisher": "Sample Publisher",
"country": "India",
"release_date": "2019-05-28"
}
response = self.client.post(
reverse('v1:books-list'),
data=json.dumps(test_data),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
"status_code": 201,
"status": "success",
"data": {
"id": 2,
"name": "Sample Book",
"isbn": "123456789",
"authors": [
"Sample Author"
],
"number_of_pages": 999,
"publisher": "Sample Publisher",
"country": "India",
"release_date": "2019-05-28"
}
})
def test_list(self):
"""
Test LIST method
"""
response = self.client.get(reverse('v1:books-list'))
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
"status_code": 200,
"status": "success",
"data": [
{
"id": self.book.id,
"name": "Test Book",
"isbn": "123456788",
"authors": [
"Test Author"
],
"number_of_pages": 888,
"publisher": "Test Publisher",
"country": "India",
"release_date": "2019-05-28"
}
]
})
def test_retrieve(self):
"""
Test DETAIL method
"""
response = self.client.get(reverse('v1:books-detail', kwargs={'pk': self.book.id}))
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
"status_code": 200,
"status": "success",
"data": {
"id": self.book.id,
"name": "Test Book",
"isbn": "123456788",
"authors": [
"Test Author"
],
"number_of_pages": 888,
"publisher": "Test Publisher",
"country": "India",
"release_date": "2019-05-28"
}
})
def test_patch(self):
"""
Test UPDATE method
"""
patch_data = {
"name": "Patch Book",
"isbn": "123456788",
"authors": [{
"name": "Test Author"
}],
"number_of_pages": 999,
"publisher": "Test Publisher",
"country": "India",
"release_date": "2019-05-28"
}
response = self.client.patch(
reverse('v1:books-detail', kwargs={'pk': self.book.id}),
data=json.dumps(patch_data),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
"status_code": 200,
"status": "success",
"message": "The book Patch Book was updated successfully",
"data": {
"id": self.book.id,
"name": "Patch Book",
"isbn": "123456788",
"authors": [
"Test Author"
],
"number_of_pages": 999,
"publisher": "Test Publisher",
"country": "India",
"release_date": "2019-05-28"
}
})
def test_destroy(self):
"""
Test DELETE method
"""
response = self.client.delete(reverse('v1:books-detail', kwargs={'pk': self.book.id}))
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), {
'status_code': 204,
'status': 'success',
'message': 'The book Test Book was deleted successfully',
'data': []
})
|
import json
from hellpy.utils import valid_type
from hellpy.structures import BaseType
from hellpy.exceptions import InvalidTypeError
class Builder(object):
pass
class UrlBuilder(Builder):
def __init__(self, base_url: str) -> None:
self.query_url = f'{base_url}/query'
self.status_url = f'{base_url}/status'
self.length_url = f'{base_url}/length'
class KeyValueBuilder(Builder):
@staticmethod
def keys_string(*keys: str) -> str:
return ' & '.join(keys)
@staticmethod
def value_string(value: BaseType) -> str:
if not valid_type(value):
raise InvalidTypeError(f'invalid type "{type(value)}" provided')
return json.dumps(value)
|
"""
Tools for drawing Python object reference graphs with graphviz.
You can find documentation online at http://mg.pov.lt/objgraph/
Copyright (c) 2008-2010 Marius Gedminas <marius@pov.lt>
Copyright (c) 2010 Stefano Rivera <stefano@rivera.za.net>
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright (c) 2008-2011 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.7.1"
__date__ = "2011-12-11"
import codecs
import gc
import re
import inspect
import types
import operator
import os
import subprocess
import tempfile
import sys
import itertools
try:
basestring
except NameError:
# Python 3.x compatibility
basestring = str
try:
iteritems = dict.iteritems
except AttributeError:
# Python 3.x compatibility
iteritems = dict.items
def count(typename, objects=None):
"""Count objects tracked by the garbage collector with a given class name.
Example:
>>> count('dict')
42
>>> count('MyClass', get_leaking_objects())
3
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
if objects is None:
objects = gc.get_objects()
return sum(1 for o in objects if type(o).__name__ == typename)
def typestats(objects=None):
"""Count the number of instances for each type tracked by the GC.
Note that the GC does not track simple objects like int or str.
Note that classes with the same name but defined in different modules
will be lumped together.
Example:
>>> typestats()
{'list': 12041, 'tuple': 10245, ...}
>>> typestats(get_leaking_objects())
{'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...}
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
if objects is None:
objects = gc.get_objects()
stats = {}
for o in objects:
stats.setdefault(type(o).__name__, 0)
stats[type(o).__name__] += 1
return stats
def most_common_types(limit=10, objects=None):
"""Count the names of types with the most instances.
Returns a list of (type_name, count), sorted most-frequent-first.
Limits the return value to at most ``limit`` items. You may set ``limit``
to None to avoid that.
The caveats documented in :func:`typestats` apply.
Example:
>>> most_common_types(limit=2)
[('list', 12041), ('tuple', 10245)]
.. versionadded:: 1.4
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
stats = sorted(typestats(objects).items(), key=operator.itemgetter(1),
reverse=True)
if limit:
stats = stats[:limit]
return stats
def show_most_common_types(limit=10, objects=None):
"""Print the table of types of most common instances.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
stats = most_common_types(limit, objects)
width = max(len(name) for name, count in stats)
for name, count in stats:
print('%-*s %i' % (width, name, count))
def show_growth(limit=10, peak_stats={}):
"""Show the increase in peak object counts since last call.
Limits the output to ``limit`` largest deltas. You may set ``limit`` to
None to see all of them.
Uses and updates ``peak_stats``, a dictionary from type names to previously
seen peak object counts. Usually you don't need to pay attention to this
argument.
The caveats documented in :func:`typestats` apply.
Example:
>>> objgraph.show_growth()
wrapper_descriptor 970 +14
tuple 12282 +10
dict 1922 +7
...
.. versionadded:: 1.5
"""
gc.collect()
stats = typestats()
deltas = {}
for name, count in iteritems(stats):
old_count = peak_stats.get(name, 0)
if count > old_count:
deltas[name] = count - old_count
peak_stats[name] = count
deltas = sorted(deltas.items(), key=operator.itemgetter(1),
reverse=True)
if limit:
deltas = deltas[:limit]
if deltas:
width = max(len(name) for name, count in deltas)
for name, delta in deltas:
print('%-*s%9d %+9d' % (width, name, stats[name], delta))
def get_leaking_objects(objects=None):
"""Return objects that do not have any referents.
These could indicate reference-counting bugs in C code. Or they could
be legitimate.
Note that the GC does not track simple objects like int or str.
.. versionadded:: 1.7
"""
if objects is None:
gc.collect()
objects = gc.get_objects()
try:
ids = set(id(i) for i in objects)
for i in objects:
ids.difference_update(id(j) for j in gc.get_referents(i))
# this then is our set of objects without referrers
return [i for i in objects if id(i) in ids]
finally:
objects = i = j = None # clear cyclic references to frame
def by_type(typename, objects=None):
"""Return objects tracked by the garbage collector with a given class name.
Example:
>>> by_type('MyClass')
[<mymodule.MyClass object at 0x...>]
Note that the GC does not track simple objects like int or str.
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
if objects is None:
objects = gc.get_objects()
return [o for o in objects if type(o).__name__ == typename]
def at(addr):
"""Return an object at a given memory address.
The reverse of id(obj):
>>> at(id(obj)) is obj
True
Note that this function does not work on objects that are not tracked by
the GC (e.g. ints or strings).
"""
for o in gc.get_objects():
if id(o) == addr:
return o
return None
def find_ref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading from obj.
The end of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_chain(obj, lambda x: isinstance(x, MyClass))
[obj, ..., <MyClass object at ...>]
Returns ``[obj]`` if such a chain could not be found.
.. versionadded:: 1.7
"""
return find_chain(obj, predicate, gc.get_referents,
max_depth=max_depth, extra_ignore=extra_ignore)[::-1]
def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()):
"""Find a shortest chain of references leading to obj.
The start of the chain will be some object that matches your predicate.
``predicate`` is a function taking one argument and returning a boolean.
``max_depth`` limits the search depth.
``extra_ignore`` can be a list of object IDs to exclude those objects from
your search.
Example:
>>> find_backref_chain(obj, inspect.ismodule)
[<module ...>, ..., obj]
Returns ``[obj]`` if such a chain could not be found.
.. versionchanged:: 1.5
Returns ``obj`` instead of ``None`` when a chain could not be found.
"""
return find_chain(obj, predicate, gc.get_referrers,
max_depth=max_depth, extra_ignore=extra_ignore)
def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False):
"""Generate an object reference graph ending at ``objs``.
The graph will show you what objects refer to ``objs``, directly and
indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a .png file,
indicating the desired output format. If not specified, ``show_backrefs``
will try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_backrefs`` will convert the .dot file to a .png
and print its name.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function taking one argument and returning a
string) to report extra information for objects.
Specify ``refcounts=True`` if you want to see reference counts.
These will mostly match the number of arrows pointing to an object,
but can be different for various reasons.
Examples:
>>> show_backrefs(obj)
>>> show_backrefs([obj1, obj2])
>>> show_backrefs(obj, max_depth=5)
>>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_backrefs(obj, highlight=inspect.isclass)
>>> show_backrefs(obj, extra_ignore=[id(locals())])
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
New parameter: ``refcounts``.
"""
show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referrers, swap_source_target=False,
filename=filename, extra_info=extra_info, refcounts=refcounts)
def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False):
"""Generate an object reference graph starting at ``objs``.
The graph will show you what objects are reachable from ``objs``, directly
and indirectly.
``objs`` can be a single object, or it can be a list of objects. If
unsure, wrap the single object in a new list.
``filename`` if specified, can be the name of a .dot or a .png file,
indicating the desired output format. If not specified, ``show_refs``
will try to produce a .dot file and spawn a viewer (xdot). If xdot is
not available, ``show_refs`` will convert the .dot file to a .png
and print its name.
Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the
graph.
Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to
remove undesired objects from the graph.
Use ``highlight`` (a predicate) to highlight certain graph nodes in blue.
Use ``extra_info`` (a function returning a string) to report extra
information for objects.
Specify ``refcounts=True`` if you want to see reference counts.
Examples:
>>> show_refs(obj)
>>> show_refs([obj1, obj2])
>>> show_refs(obj, max_depth=5)
>>> show_refs(obj, filter=lambda x: not inspect.isclass(x))
>>> show_refs(obj, highlight=inspect.isclass)
>>> show_refs(obj, extra_ignore=[id(locals())])
.. versionadded:: 1.1
.. versionchanged:: 1.3
New parameters: ``filename``, ``extra_info``.
.. versionchanged:: 1.5
New parameter: ``refcounts``.
Follows references from module objects instead of stopping.
"""
show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore,
filter=filter, too_many=too_many, highlight=highlight,
edge_func=gc.get_referents, swap_source_target=True,
filename=filename, extra_info=extra_info, refcounts=refcounts)
def show_chain(*chains, **kw):
"""Show a chain (or several chains) of object references.
Useful in combination with :func:`find_ref_chain` or
:func:`find_backref_chain`, e.g.
>>> show_chain(find_backref_chain(obj, inspect.ismodule))
You can specify if you want that chain traced backwards or forwards
by passing a ``backrefs`` keyword argument, e.g.
>>> show_chain(find_ref_chain(obj, inspect.ismodule),
... backrefs=False)
Ideally this shouldn't matter, but for some objects
:func:`gc.get_referrers` and :func:`gc.get_referents` are not perfectly
symmetrical.
You can specify ``highlight``, ``extra_info`` or ``filename`` arguments
like for :func:`show_backrefs` or :func:`show_refs`.
.. versionadded:: 1.5
.. versionchanged:: 1.7
New parameter: ``backrefs``.
"""
backrefs = kw.pop('backrefs', True)
chains = [chain for chain in chains if chain] # remove empty ones
def in_chains(x, ids=set(map(id, itertools.chain(*chains)))):
return id(x) in ids
max_depth = max(map(len, chains)) - 1
if backrefs:
show_backrefs([chain[-1] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
else:
show_refs([chain[0] for chain in chains], max_depth=max_depth,
filter=in_chains, **kw)
#
# Internal helpers
#
def find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()):
queue = [obj]
depth = {id(obj): 0}
parent = {id(obj): None}
ignore = set(extra_ignore)
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(parent))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain, most likely
gc.collect()
while queue:
target = queue.pop(0)
if predicate(target):
chain = [target]
while parent[id(target)] is not None:
target = parent[id(target)]
chain.append(target)
return chain
tdepth = depth[id(target)]
if tdepth < max_depth:
referrers = edge_func(target)
ignore.add(id(referrers))
for source in referrers:
if id(source) in ignore:
continue
if id(source) not in depth:
depth[id(source)] = tdepth + 1
parent[id(source)] = target
queue.append(source)
return [obj] # not found
def show_graph(objs, edge_func, swap_source_target,
max_depth=3, extra_ignore=(), filter=None, too_many=10,
highlight=None, filename=None, extra_info=None,
refcounts=False):
if not isinstance(objs, (list, tuple)):
objs = [objs]
if filename and filename.endswith('.dot'):
f = codecs.open(filename, 'w', encoding='utf-8')
dot_filename = filename
else:
fd, dot_filename = tempfile.mkstemp('.dot', text=True)
f = os.fdopen(fd, "w")
if f.encoding is not None:
# Python 3 will wrap the file in the user's preferred encoding
# Re-wrap it for utf-8
import io
f = io.TextIOWrapper(f.detach(), 'utf-8')
f.write('digraph ObjectGraph {\n'
' node[shape=box, style=filled, fillcolor=white];\n')
queue = []
depth = {}
ignore = set(extra_ignore)
ignore.add(id(objs))
ignore.add(id(extra_ignore))
ignore.add(id(queue))
ignore.add(id(depth))
ignore.add(id(ignore))
ignore.add(id(sys._getframe())) # this function
ignore.add(id(sys._getframe(1))) # show_refs/show_backrefs, most likely
for obj in objs:
f.write(' %s[fontcolor=red];\n' % (obj_node_id(obj)))
depth[id(obj)] = 0
queue.append(obj)
del obj
gc.collect()
nodes = 0
while queue:
nodes += 1
target = queue.pop(0)
tdepth = depth[id(target)]
f.write(' %s[label="%s"];\n' % (obj_node_id(target), obj_label(target, extra_info, refcounts)))
h, s, v = gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth)
if inspect.ismodule(target):
h = .3
s = 1
if highlight and highlight(target):
h = .6
s = .6
v = 0.5 + v * 0.5
f.write(' %s[fillcolor="%g,%g,%g"];\n' % (obj_node_id(target), h, s, v))
if v < 0.5:
f.write(' %s[fontcolor=white];\n' % (obj_node_id(target)))
if hasattr(getattr(target, '__class__', None), '__del__'):
f.write(" %s->%s_has_a_del[color=red,style=dotted,len=0.25,weight=10];\n" % (obj_node_id(target), obj_node_id(target)))
f.write(' %s_has_a_del[label="__del__",shape=doublecircle,height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n' % (obj_node_id(target)))
if tdepth >= max_depth:
continue
if inspect.ismodule(target) and not swap_source_target:
# For show_backrefs(), it makes sense to stop when reaching a
# module because you'll end up in sys.modules and explode the
# graph with useless clutter. For show_refs(), it makes sense
# to continue.
continue
neighbours = edge_func(target)
ignore.add(id(neighbours))
n = 0
skipped = 0
for source in neighbours:
if id(source) in ignore:
continue
if filter and not filter(source):
continue
if n >= too_many:
skipped += 1
continue
if swap_source_target:
srcnode, tgtnode = target, source
else:
srcnode, tgtnode = source, target
elabel = edge_label(srcnode, tgtnode)
f.write(' %s -> %s%s;\n' % (obj_node_id(srcnode), obj_node_id(tgtnode), elabel))
if id(source) not in depth:
depth[id(source)] = tdepth + 1
queue.append(source)
n += 1
del source
del neighbours
if skipped > 0:
h, s, v = gradient((0, 1, 1), (0, 1, .3), tdepth + 1, max_depth)
if swap_source_target:
label = "%d more references" % skipped
edge = "%s->too_many_%s" % (obj_node_id(target), obj_node_id(target))
else:
label = "%d more backreferences" % skipped
edge = "too_many_%s->%s" % (obj_node_id(target), obj_node_id(target))
f.write(' %s[color=red,style=dotted,len=0.25,weight=10];\n' % edge)
f.write(' too_many_%s[label="%s",shape=box,height=0.25,color=red,fillcolor="%g,%g,%g",fontsize=6];\n' % (obj_node_id(target), label, h, s, v))
f.write(' too_many_%s[fontcolor=white];\n' % (obj_node_id(target)))
f.write("}\n")
f.close()
print("Graph written to %s (%d nodes)" % (dot_filename, nodes))
if filename and filename.endswith('.dot'):
# nothing else to do, the user asked for a .dot file
return
if not filename and program_in_path('xdot'):
print("Spawning graph viewer (xdot)")
subprocess.Popen(['xdot', dot_filename], close_fds=True)
elif program_in_path('dot'):
if not filename:
print("Graph viewer (xdot) not found, generating a png instead")
if filename and filename.endswith('.png'):
f = open(filename, 'wb')
png_filename = filename
else:
if filename:
print("Unrecognized file type (%s)" % filename)
fd, png_filename = tempfile.mkstemp('.png', text=False)
f = os.fdopen(fd, "wb")
dot = subprocess.Popen(['dot', '-Tpng', dot_filename],
stdout=f, close_fds=False)
dot.wait()
f.close()
print("Image generated as %s" % png_filename)
else:
if filename:
print("Graph viewer (xdot) and image renderer (dot) not found, not doing anything else")
else:
print("Unrecognized file type (%s), not doing anything else" % filename)
def obj_node_id(obj):
return ('o%d' % id(obj)).replace('-', '_')
def obj_label(obj, extra_info=None, refcounts=False):
label = [type(obj).__name__]
if refcounts:
label[0] += ' [%d]' % (sys.getrefcount(obj) - 4)
# Why -4? To ignore the references coming from
# obj_label's frame (obj)
# show_graph's frame (target variable)
# sys.getrefcount()'s argument
# something else that doesn't show up in gc.get_referrers()
label.append(safe_repr(obj))
if extra_info:
label.append(str(extra_info(obj)))
return quote('\n'.join(label))
def quote(s):
return (s.replace("\\", "\\\\")
.replace("\"", "\\\"")
.replace("\n", "\\n")
.replace("\0", "\\\\0"))
def safe_repr(obj):
try:
return short_repr(obj)
except:
return '(unrepresentable)'
def short_repr(obj):
if isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType,
types.BuiltinFunctionType)):
return obj.__name__
if isinstance(obj, types.MethodType):
try:
if obj.__self__ is not None:
return obj.__func__.__name__ + ' (bound)'
else:
return obj.__func__.__name__
except AttributeError:
# Python < 2.6 compatibility
if obj.im_self is not None:
return obj.im_func.__name__ + ' (bound)'
else:
return obj.im_func.__name__
if isinstance(obj, types.FrameType):
return '%s:%s' % (obj.f_code.co_filename, obj.f_lineno)
if isinstance(obj, (tuple, list, dict, set)):
return '%d items' % len(obj)
return repr(obj)[:40]
def gradient(start_color, end_color, depth, max_depth):
if max_depth == 0:
# avoid division by zero
return start_color
h1, s1, v1 = start_color
h2, s2, v2 = end_color
f = float(depth) / max_depth
h = h1 * (1-f) + h2 * f
s = s1 * (1-f) + s2 * f
v = v1 * (1-f) + v2 * f
return h, s, v
def edge_label(source, target):
if isinstance(target, dict) and target is getattr(source, '__dict__', None):
return ' [label="__dict__",weight=10]'
if isinstance(source, types.FrameType):
if target is source.f_locals:
return ' [label="f_locals",weight=10]'
if target is source.f_globals:
return ' [label="f_globals",weight=10]'
if isinstance(source, types.MethodType):
try:
if target is source.__self__:
return ' [label="__self__",weight=10]'
if target is source.__func__:
return ' [label="__func__",weight=10]'
except AttributeError:
# Python < 2.6 compatibility
if target is source.im_self:
return ' [label="im_self",weight=10]'
if target is source.im_func:
return ' [label="im_func",weight=10]'
if isinstance(source, types.FunctionType):
for k in dir(source):
if target is getattr(source, k):
return ' [label="%s",weight=10]' % quote(k)
if isinstance(source, dict):
for k, v in iteritems(source):
if v is target:
if isinstance(k, basestring) and is_identifier(k):
return ' [label="%s",weight=2]' % quote(k)
else:
return ' [label="%s"]' % quote(type(k).__name__ + "\n"
+ safe_repr(k))
return ''
is_identifier = re.compile('[a-zA-Z_][a-zA-Z_0-9]*$').match
def program_in_path(program):
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path = [os.path.join(directory, program) for directory in path]
path = [True for filename in path
if os.path.isfile(filename) or os.path.isfile(filename + '.exe')]
return bool(path)
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
from .api import API, Resource
from .handlers import sync_fetch, async_fetch
from . import oauth2
__version__ = '1.0.6'
|
#!/usr/bin/env python
"""ur1.py -- command-line ur1.ca client.
ur1.ca is the URL shortening services provided by status.net. This script
makes it possible to access the service from the command line. This is done
by scraping the returned page and look for the shortened URL.
USAGE:
ur1.py LONGURL
RETURN STATUS:
If the URL is succesfully shortened by ur1.ca, it is written
to the standard output, and the program exits with status 0.
If ur1.ca fails to shorten the long URL, the error message
provided by ur1.ca is written to the standard error, and the
program exits with status 1.
If the input URL is malformed, no attempts of contacting the
server is made, and the program exits with status 2.
"""
import sys
import urllib
import urlparse
import re
UR1CA = "http://ur1.ca/"
ESUCCESS = 0
EFAIL = 1
EBADARG = 2
RE_GOOD = re.compile(r'<p class="success">Your ur1 is: <a href="(?P<shorturl>.+)">(?P=shorturl)</a></p>')
RE_BAD = re.compile(r'<p class="error">(?P<errormsg>.+)</p>')
def isgoodarg(url):
"""Check if the input URL makes "sense".
A URL does not make sense if the scheme is neither http or https,
or the host part is missing.
url: input URL
Returns boolean indicating whether the URL makes sense.
"""
parse_result = urlparse.urlparse(url)
#pylint: disable-msg=E1101
isgoodscheme = (parse_result.scheme == "http" or
parse_result.scheme == "https")
isgoodhost = parse_result.hostname
return isgoodscheme and isgoodhost
def parameterize(url):
"""Encode input URL as POST parameter.
url: a string which is the URL to be passed to ur1.ca service.
Returns the POST parameter constructed from the URL.
"""
return urllib.urlencode({"longurl": url})
def request(parameter):
"""Send POST request to ur1.ca using the parameter.
parameter: the parameter to the POST request, as returned by
parameterize().
Returns the file-like object as returned by urllib.urlopen.
"""
return urllib.urlopen(UR1CA, parameter)
def retrievedoc(response):
"""Retrieve the HTML text from the ur1.ca response.
response: the file-like HTTP response file returned by ur1.ca.
Returns the text as a string.
"""
#XXX: ensure all bytes are read
res_info = response.info()
clength = int(res_info["content-length"])
return response.read(clength)
def scrape(document):
"""Scrape the HTML document returned from ur1.ca for the answer.
document: HTML document returned from ur1.ca
Returns a 2-tuple (success, answer) where --
success: boolean value indicating whether the service returned
some meaningful result
answer: if success, this is the shortened URL, otherwise a string
indicating the possible problem
"""
goodguess = RE_GOOD.search(document)
if goodguess:
matchdict = goodguess.groupdict()
return (True, matchdict["shorturl"])
badguess = RE_BAD.search(document)
if badguess:
matchdict = badguess.groupdict()
return (False, matchdict["errormsg"])
else:
return (False, "Unknown local error.")
def __do_main():
"""Do everything."""
try:
arg = sys.argv[1]
except IndexError:
sys.exit(EBADARG)
if not isgoodarg(arg):
sys.exit(EBADARG)
post_param = parameterize(arg)
answerfile = request(post_param)
doc = retrievedoc(answerfile)
answerfile.close()
status, msg = scrape(doc)
if status:
print msg
sys.exit(ESUCCESS)
else:
print >> sys.stderr, msg
sys.exit(EFAIL)
if __name__ == "__main__":
__do_main()
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = ''
_C.LOG_DIR = ''
_C.DATA_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.RANK = 0
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = 'pose_hrnet'
_C.MODEL.INIT_WEIGHTS = True
_C.MODEL.PRETRAINED = ''
_C.MODEL.NUM_JOINTS = 17
_C.MODEL.TAG_PER_JOINT = True
_C.MODEL.TARGET_TYPE = 'gaussian'
_C.MODEL.IMAGE_SIZE = [256, 256] # width * height, ex: 192 * 256
_C.MODEL.HEATMAP_SIZE = [64, 64] # width * height, ex: 24 * 32
_C.MODEL.SIGMA = 2
_C.MODEL.EXTRA = CN(new_allowed=True)
_C.LOSS = CN()
_C.LOSS.USE_OHKM = False
_C.LOSS.TOPK = 8
_C.LOSS.USE_TARGET_WEIGHT = True
_C.LOSS.USE_DIFFERENT_JOINTS_WEIGHT = False
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'mpii'
_C.DATASET.TRAIN_SET = 'train'
_C.DATASET.TEST_SET = 'valid'
_C.DATASET.DATA_FORMAT = 'jpg'
_C.DATASET.HYBRID_JOINTS_TYPE = ''
_C.DATASET.SELECT_DATA = False
# training data augmentation
_C.DATASET.FLIP = True
_C.DATASET.SCALE_FACTOR = 0.25
_C.DATASET.ROT_FACTOR = 30
_C.DATASET.PROB_HALF_BODY = 0.0
_C.DATASET.NUM_JOINTS_HALF_BODY = 8
_C.DATASET.COLOR_RGB = False
# train
_C.TRAIN = CN()
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = [90, 110]
_C.TRAIN.LR = 0.001
_C.TRAIN.OPTIMIZER = 'adam'
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.NESTEROV = False
_C.TRAIN.GAMMA1 = 0.99
_C.TRAIN.GAMMA2 = 0.0
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 140
_C.TRAIN.RESUME = False
_C.TRAIN.CHECKPOINT = ''
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
# testing
_C.TEST = CN()
# size of images for each device
_C.TEST.BATCH_SIZE_PER_GPU = 32
# Test Model Epoch
_C.TEST.FLIP_TEST = False
_C.TEST.POST_PROCESS = False
_C.TEST.SHIFT_HEATMAP = False
_C.TEST.USE_GT_BBOX = False
# nms
_C.TEST.IMAGE_THRE = 0.1
_C.TEST.NMS_THRE = 0.6
_C.TEST.SOFT_NMS = False
_C.TEST.OKS_THRE = 0.5
_C.TEST.IN_VIS_THRE = 0.0
_C.TEST.COCO_BBOX_FILE = ''
_C.TEST.BBOX_THRE = 1.0
_C.TEST.MODEL_FILE = ''
# debug
_C.DEBUG = CN()
_C.DEBUG.DEBUG = False
_C.DEBUG.SAVE_BATCH_IMAGES_GT = False
_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False
_C.DEBUG.SAVE_HEATMAPS_GT = False
_C.DEBUG.SAVE_HEATMAPS_PRED = False
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
if args.modelDir:
cfg.OUTPUT_DIR = args.modelDir
# if args.logDir:
# cfg.LOG_DIR = args.logDir
#
# if args.dataDir:
# cfg.DATA_DIR = args.dataDir
#
# cfg.DATASET.ROOT = os.path.join(
# cfg.DATA_DIR, cfg.DATASET.ROOT
# )
#
# cfg.MODEL.PRETRAINED = os.path.join(
# cfg.DATA_DIR, cfg.MODEL.PRETRAINED
# )
#
# if cfg.TEST.MODEL_FILE:
# cfg.TEST.MODEL_FILE = os.path.join(
# cfg.DATA_DIR, cfg.TEST.MODEL_FILE
# )
cfg.freeze()
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
|
import numpy as np
from copy import copy
class StandardScaler:
def __init__(self):
self.mean = None
self.var = None
"""
Standardize features by centering the mean to 0 and unit variance.
The standard score of an instance is calculated by:
z = (x - u) / s
where u is the mean of the training data and s is the standard deviation.
Standardizing data is often necessary before training many machine
learning models to avoid problems like exploding/vanishing gradients and
feature dominance.
Attributes
----------
_mean : numpy array of shape (n_features, )
The mean of each feature in the training set.
_var : numpy array of shape (n_features, )
The variance of each feature in the training set.
"""
def fit(self, dataset):
"""
Calculate and store the mean and variance of each feature in the
training set.
Parameters
----------
dataset : A Dataset OBJECT to be standardized
"""
self.mean = np.mean(dataset.X, axis=0) # aplicar a média no eixo 1 (em cada col)
self.var = np.var(dataset.X, axis=0) # aplicar a média no eixo 1 (em cada col)
def transform(self, dataset, inline=False):
"""
Standardize data by subtracting out the mean and dividing by
standard deviation calculated during fitting.
Parameters
----------
:param dataset: A Dataset object to be standardized
:param inline:
Returns
-------
A Dataset object with standardized data.
"""
Z = (dataset.X - self.mean)/np.sqrt(self.var)
if inline:
dataset.X = Z
return dataset
else:
from src.si.data import Dataset
return Dataset(Z, copy(dataset.Y), copy(dataset._xnames), copy(dataset._yname))
def fit_transform(self, dataset, inline=False):
"""
Calculate and store the mean and variance of each feature and
standardize the data.
Parameters
----------
:param dataset : A Dataset object to be standardized
:param inline:
Returns
-------
A Dataset object to with standardized data.
"""
self.fit(dataset)
return self.transform(dataset, inline=inline)
def inverse_transform(self, dataset, inline=False):
"""
Transform data back into original state by multiplying by standard
deviation and adding the mean back in.
Inverse standard scaler:
x = z * s + u
where s is the standard deviation, and u is the mean.
Parameters
----------
:param dataset : A standardized Dataset object
:param inline:
Returns
-------
Dataset object
"""
volta = (dataset.X * np.sqrt(self.var)) + self.mean
if inline:
dataset.X = volta
return dataset
else:
from src.si.data import Dataset
return Dataset(volta, copy(dataset.Y), copy(dataset._xnames), copy(dataset._yname))
|
from typing import Tuple
import torchvision
from torch import nn
import backbone.base
class ResNet50(backbone.base.Base):
def __init__(self, pretrained: bool):
super().__init__(pretrained)
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
# 这里调用的是Resnet50
resnet50 = torchvision.models.resnet50(pretrained=self._pretrained)
# list(resnet50.children()) consists of following modules
# [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
# [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
# [5] = Sequential(Bottleneck...),
# [6] = Sequential(Bottleneck...),
# [7] = Sequential(Bottleneck...),
# [8] = AvgPool2d, [9] = Linear
children = list(resnet50.children())
features = children[:-3]
num_features_out = 1024# 此时 featuremap 的通道数为1024
hidden = children[-3]
num_hidden_out = 2048# hidden channel为2048
for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
for parameter in parameters:
parameter.requires_grad = False
features = nn.Sequential(*features)
return features, hidden, num_features_out, num_hidden_out
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
from .matching import *
|
import sys
assert sys.version_info.major >= 3
sys.path.insert(0, '/home/flaskwsgi/public_wsgi/')
from app import app as application
|
"""
minus
=====
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "math" category
"""
class minus(Operator):
"""Computes the difference of two fields. If one field's scoping has 'overall' location, then these field's values are applied on the entire other field.
available inputs:
- fieldA (Field, FieldsContainer)
- fieldB (Field, FieldsContainer)
available outputs:
- field (Field)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.minus()
>>> # Make input connections
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.minus(fieldA=my_fieldA,fieldB=my_fieldB)
>>> # Get output data
>>> result_field = op.outputs.field()"""
def __init__(self, fieldA=None, fieldB=None, config=None, server=None):
super().__init__(name="minus", config = config, server = server)
self._inputs = InputsMinus(self)
self._outputs = OutputsMinus(self)
if fieldA !=None:
self.inputs.fieldA.connect(fieldA)
if fieldB !=None:
self.inputs.fieldB.connect(fieldB)
@staticmethod
def _spec():
spec = Specification(description="""Computes the difference of two fields. If one field's scoping has 'overall' location, then these field's values are applied on the entire other field.""",
map_input_pin_spec={
0 : PinSpecification(name = "fieldA", type_names=["field","fields_container"], optional=False, document="""field or fields container with only one field is expected"""),
1 : PinSpecification(name = "fieldB", type_names=["field","fields_container"], optional=False, document="""field or fields container with only one field is expected""")},
map_output_pin_spec={
0 : PinSpecification(name = "field", type_names=["field"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "minus")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMinus
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsMinus
"""
return super().outputs
#internal name: minus
#scripting name: minus
class InputsMinus(_Inputs):
"""Intermediate class used to connect user inputs to minus operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.minus()
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
"""
def __init__(self, op: Operator):
super().__init__(minus._spec().inputs, op)
self._fieldA = Input(minus._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fieldA)
self._fieldB = Input(minus._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._fieldB)
@property
def fieldA(self):
"""Allows to connect fieldA input to the operator
- pindoc: field or fields container with only one field is expected
Parameters
----------
my_fieldA : Field, FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.minus()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> #or
>>> op.inputs.fieldA(my_fieldA)
"""
return self._fieldA
@property
def fieldB(self):
"""Allows to connect fieldB input to the operator
- pindoc: field or fields container with only one field is expected
Parameters
----------
my_fieldB : Field, FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.minus()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> #or
>>> op.inputs.fieldB(my_fieldB)
"""
return self._fieldB
class OutputsMinus(_Outputs):
"""Intermediate class used to get outputs from minus operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.minus()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(minus._spec().outputs, op)
self._field = Output(minus._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.minus()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
return self._field
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoinpirate.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "bitcoinpirate.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resnet50 quantized model."""
from aqt.jax_legacy.jax.imagenet.configs import base_config
def get_config(quant_target=base_config.QuantTarget.WEIGHTS_AND_FIXED_ACT):
"""Gets Resnet50 config for 8 bits weights and fixed activation quantization.
conv_init and last dense layer not quantized as these are the most
sensitive layers in the model.
Args:
quant_target: quantization target, of type QuantTarget.
Returns:
ConfigDict instance.
"""
config = base_config.get_config(
imagenet_type=base_config.ImagenetType.RESNET50,
quant_target=quant_target)
config.weight_prec = 8
config.quant_act.prec = 8
return config
|
__all__ = ["overturn", "shearstrain", "nsq"]
__version__ = "0.1.0"
from . import nsq, overturn, shearstrain
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError('FreqDist.Nr(): r must be non-negative')
# Special case for Nr(0):
if r == 0:
return (bins-self.B() if bins is not None else 0)
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
return (self._Nr_cache[r] if r < len(self._Nr_cache) else 0)
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print("%4s" % str(samples[i]), end=' ')
print()
for i in range(len(samples)):
print("%4d" % freqs[i], end=' ')
print()
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / float(self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print("Error: Please install numpy; for instructions see http://www.nltk.org/")
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%4s" % str(s), end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, str(c)), end=' ')
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print("%4d" % f, end=' ')
print()
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print(' fdist1:', str(fdist1))
print(' fdist2:', str(fdist2))
print(' fdist3:', str(fdist3))
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print('%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" ))
for key in fd:
print('%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'lsquic'
copyright = u'2020, LiteSpeed Technologies'
author = u'LiteSpeed Technologies'
# The short X.Y version
version = u'2.11'
# The full version, including alpha/beta/rc tags
release = u'2.11.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
default_role = 'c:func'
primary_domain = 'c'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lsquicdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lsquic.tex', u'lsquic Documentation',
u'LiteSpeed Technologies', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lsquic', u'lsquic Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lsquic', u'lsquic Documentation',
author, 'lsquic', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
import json
import os
from utils.args_utils import get_directory_or_cwd
from utils.constants import zpspec_file_name
def init_command(args):
name = args.name
version = args.version
zpspec_json_dict = {
'packageName': name,
'version': version
}
directory = get_directory_or_cwd(args)
zpspec_path = os.path.join(directory, zpspec_file_name)
with open(zpspec_path, 'tx') as file:
json.dump(zpspec_json_dict, file)
print(f'Created zpspec file under {zpspec_path}')
|
# Helper for the mirror on GAE
# GAE GETs an action gae_file, giving GAE host and a secret
# PyPI GETs /mkupload/secret, learning path and upload session
# PyPI POSTs to upload session
import urllib2, httplib, threading, os, binascii, urlparse
POST="""\
--%(boundary)s
Content-Disposition: form-data; name="secret"
%(secret)s
--%(boundary)s
Content-Disposition: form-data; name="path"
%(path)s
--%(boundary)s
Content-Disposition: form-data; name="file"; filename="%(path)s"
Content-Type: application/octet-stream
%(data)s
--%(boundary)s
"""
POST = "\r\n".join(POST.splitlines())+"\r\n"
def doit(host, secret, srcdir):
x = urllib2.urlopen('http://%s/mkupload/%s' % (host, secret))
if x.code != 200:
return
path,url = x.read().splitlines()
host, session = urlparse.urlsplit(url)[1:3]
try:
data = open(srcdir+"/"+path).read()
except IOError, e:
return
boundary = ""
while boundary in data:
boundary = binascii.hexlify(os.urandom(10))
body = POST % locals()
if ':' in host:
host, port = host.split(':')
else:
port = 80
c = httplib.HTTPConnection(host, port)
c.request('POST', session,
headers = {'Content-type':'multipart/form-data; boundary='+boundary,
'Content-length':str(len(body)),
'Host':host},
body=body)
resp = c.getresponse()
data = resp.read()
# result code should be redirect
c.close()
def transfer(host, secret, srcdir):
secret = secret.encode('ascii')
t = threading.Thread(target=doit, args=(host, secret, srcdir))
t.start()
|
from unittest import TestCase
import nisyscfg.system
import nisyscfg.hardware_resource
from click.testing import CliRunner
from nisyscfgcli import nisyscfgcli
from unittest.mock import patch
class Mock_NI_Hardware_Item:
def __init__(self, expert_user_alias, product_name):
self.expert_user_alias = expert_user_alias
self.product_name = product_name
def get_property(self, name, default):
return self.product_name
def rename(self, new_name):
return None
def delete(self):
return None
def self_test(self):
return None
def upgrade_firmware(self, version):
return None
fake_1 = Mock_NI_Hardware_Item("A", "NI_Product")
fake_2 = Mock_NI_Hardware_Item("B", "NI_Product")
fake_3 = Mock_NI_Hardware_Item("C", "NI_Product")
fake_4 = Mock_NI_Hardware_Item("D", "NI_Product")
example_data = [fake_1, fake_2, fake_3]
# =======================================================List Tests=====================================================
class TestListCommand(TestCase):
def test_list_no_flags(self):
with patch.object(
nisyscfg.system.Session,
"find_hardware",
return_value=[Mock_NI_Hardware_Item("A", "NI_Product")],
):
runner = CliRunner()
command = "list"
result = runner.invoke(nisyscfgcli, [command])
assert "A" in result.output
assert result.exit_code == 0
def test_list_verbose_flag(self):
with patch.object(
nisyscfg.system.Session,
"find_hardware",
return_value=[Mock_NI_Hardware_Item("A", "NI_Product")],
):
runner = CliRunner()
command = "list"
flag = "-v"
result = runner.invoke(nisyscfgcli, [command, flag])
assert "A" in result.output
assert "--Product Name:" in result.output
assert result.exit_code == 0
# empty string is understood as 'localhost' so no need to put any arguments
# this should not work as Click expects an argument when this flag is used
def test_list_RT_with_flag_no_arg(self):
runner = CliRunner()
command = "list"
flag = "-r"
result = runner.invoke(nisyscfgcli, [command, flag])
assert result.exit_code == 2
def test_list_RT_with_flag_with_arg(self):
runner = CliRunner()
command = "list"
flag = "-r"
arg = "localhost"
result = runner.invoke(nisyscfgcli, [command, flag, arg])
assert result.exit_code == 0
assert f"Scanning {arg}" in result.output
# =======================================================Delete Tests===================================================
class TestDeleteCommand(TestCase):
def test_delete_non_existing_device(self):
runner = CliRunner()
result = runner.invoke(nisyscfgcli, ["delete", "x"])
assert result.exit_code == 0
assert "retry" in result.output
def test_delete_existing_device(self):
assert 0 == 0
with patch.object(
nisyscfg.system.Session,
"find_hardware",
return_value=[Mock_NI_Hardware_Item("A", "NI_Product")],
):
runner = CliRunner()
result = runner.invoke(nisyscfgcli, ["delete", "A"])
assert result.exit_code == 1
assert "you sure" in result.output
def test_delete_with_force_flag(self):
with patch.object(
nisyscfg.system.Session,
"find_hardware",
return_value=[Mock_NI_Hardware_Item("A", "NI_Product")],
):
runner = CliRunner()
name = fake_1.expert_user_alias
command = "delete"
flag = "-y"
result = runner.invoke(nisyscfgcli, [command, flag, name])
assert result.exit_code == 0
assert "deleted" in result.output
# =======================================================Rename Tests===================================================
class TestRenameCommand(TestCase):
def test_rename_no_args(self):
runner = CliRunner()
command = "rename"
result = runner.invoke(nisyscfgcli, [command])
assert result.exit_code == 2
# doesnt let the user rename with empty string
def test_rename_one_arg(self):
runner = CliRunner()
command = "rename"
name = "name"
result = runner.invoke(nisyscfgcli, [command, name])
assert result.exit_code == 2
def test_rename_with_not_available_device(self):
runner = CliRunner()
command = "rename"
name_1 = "not real"
name_2 = "not real"
result = runner.invoke(nisyscfgcli, [command, name_1, name_2])
assert result.exit_code == 0
assert "No items with matching alias" in result.output
def test_rename_to_existing_name(self):
with patch.object(
nisyscfg.system.Session, "find_hardware", return_value=example_data
):
runner = CliRunner()
command = "rename"
name_1 = fake_1.expert_user_alias
name_2 = fake_3.expert_user_alias
result = runner.invoke(nisyscfgcli, [command, name_1, name_2])
assert result.exit_code == 0
assert "already in use" in result.output
def test_rename_with_everything_correct(self):
with patch.object(
nisyscfg.system.Session, "find_hardware", return_value=example_data
):
runner = CliRunner()
command = "rename"
name_1 = fake_1.expert_user_alias
name_2 = fake_4.expert_user_alias
result = runner.invoke(nisyscfgcli, [command, name_1, name_2])
assert result.exit_code == 0
assert "Rename successful!" in result.output
# =======================================================Info Tests=====================================================
class TestInfoCommand(TestCase):
def test_info_with_not_available_device(self):
runner = CliRunner()
command = "info"
result = runner.invoke(nisyscfgcli, [command])
assert result.exit_code == 2
def test_info_with_available_device(self):
with patch.object(
nisyscfg.system.Session, "find_hardware", return_value=example_data
):
runner = CliRunner()
command = "info"
name = fake_1.expert_user_alias
result = runner.invoke(nisyscfgcli, [command, name])
assert result.exit_code == 0
assert "--Product Name:" in result.output
# =======================================================self_test Tests================================================
class TestSelfTestCommand(TestCase):
def test_self_test_with_no_alias(self):
runner = CliRunner()
command = "self_test"
result = runner.invoke(nisyscfgcli, [command])
assert result.exit_code == 2
def test_self_test_with_non_existing_alias(self):
runner = CliRunner()
command = "self_test"
alias = "nothing"
result = runner.invoke(nisyscfgcli, [command, alias])
assert result.exit_code == 0
assert "No items with matching alias" in result.output
def test_self_test_with_existing_alias(self):
with patch.object(
nisyscfg.system.Session, "find_hardware", return_value=example_data
):
runner = CliRunner()
command = "self_test"
alias = fake_1.expert_user_alias
result = runner.invoke(nisyscfgcli, [command, alias])
assert result.exit_code == 0
assert "completed successfully" in result.output
# =======================================================upgrade_firmware Tests=========================================
class TestUpgradeFirmwareCommand(TestCase):
def test_upgrade_firmware_with_no_alias(self):
runner = CliRunner()
command = "upgrade_firmware"
result = runner.invoke(nisyscfgcli, [command])
assert result.exit_code == 2
def test_upgrade_firmware_with_non_existing_alias(self):
runner = CliRunner()
command = "upgrade_firmware"
alias = "nothing"
result = runner.invoke(nisyscfgcli, [command, alias])
assert result.exit_code == 0
assert "No items with matching alias" in result.output
def test_upgrade_firmware_with_existing_alias(self):
with patch.object(
nisyscfg.system.Session, "find_hardware", return_value=example_data
):
runner = CliRunner()
command = "upgrade_firmware"
alias = fake_1.expert_user_alias
result = runner.invoke(nisyscfgcli, [command, alias])
assert result.exit_code == 0
assert "Firmware upgraded" in result.output
|
# vim: encoding=utf-8
""" Localization table
"""
LITS = {
# pylint: disable=line-too-long
'en': ["New Moon", "First Quarter", "Full Moon", "Last Quarter", "Northern Hemisphere", "Southern Hemisphere"],
'be': ["Маладзік", "Першая чвэрць", "Поўня", "Апошняя чвэрць", "Паўночнае паўшар’е", "Паўднёвае паўшар’е"],
'bg': ["Новолуние", "Първа четвърт", "Пълнолуние", "Последна четвърт", "Северно полукълбо", "Южно полукълбо"],
'ca': ["Noviluni", "Quart creixent", "Pleniluni", "Lluna minvant", "Hemisferi nord", "Hemisferi sud"],
'cs': ["Nov", "První čtvrť", "Úplněk", "Poslední čtvrť", "Severní polokoule", "Jižní polokoule"],
'da': ["Nymåne", "Tiltagende", "Fuldmåne", "Aftagende", "Nordlige halvkugle", "Sydlige halvkugle"],
'de': ["Neumond", "Erstes Viertel", "Vollmond", "Letztes Viertel", "Nordhalbkugel", "Südhalbkugel"],
'et': ["Noorkuu", "Esimene veerand", "Täiskuu", "Viimane veerand", "Põhjapoolkera", "Lõunapoolkera"],
'el': ["Νέα Σελήνη", "Πρώτο τέταρτο", "Πανσέληνος", "Τελευταίο τέταρτο", "Βόρειο ημισφαίριο", "Νότιο ημισφαίριο"],
'es': ["Luna nueva", "Cuarto creciente", "Luna Llena", "Cuarto menguante", "Hemisferio norte", "Hemisferio sur"],
'eo': ["Novluno", "Unua lunduono", "Plenluno", "Lasta lunduono", "Norda duonglobo", "Suda duonglobo"],
'fi': ["Uusikuu", "Kasvava puolikuu", "Täysikuu", "Laskeva puolikuu", "Pohjoinen pallonpuolisko", "Eteläinen pallonpuolisko"],
'fr': ["Nouvelle lune", "Premier quartier", "Pleine lune", "Dernier quartier", "Hémisphère nord", "Hémisphère sud"],
'hr': ["Mlađak", "Prva četvrt", "Uštap", "Zadnja četvrt", "Sjeverna polutka", "Južna polutka"],
'ia': ["Nove lunio", "Primo quarte", "Plenilunio", "Ultime quarte", "Hemispherio del nord", "Hemispherio del sud"],
'it': ["Luna nuova", "Primo quarto", "Luna piena", "Ultimo quarto", "Emisfero boreale", "Emisfero australe"],
'ja': ["新月", "上弦", "満月", "下弦", "北半球", "南半球"],
'nl': ["Nieuwe maan", "Eerste kwartier", "Volle maan", "Laatste kwartier", "Noordelijk halfrond", "Zuidelijk halfrond"],
'ru': ["Новолуние", "Первая четверть", "Полнолуние", "Последняя четверть", "Северное полушарие", "Южное полушарие"],
'pl': ["Nów", "Pierwsza kwadra", "Pełnia", "Ostatnia kwadra", "Półkula północna", "Półkula południowa"],
'pt': ["Lua nova", "Quarto crescente", "Lua cheia", "Quarto minguante", "Hemisfério Norte", "Hemisfério Sul"],
'ro': ["Lună nouă", "Primul pătrar", "Lună plină", "Ultimul pătrar", "Emisfera nordică", "Emisfera sudică"],
'sk': ["Nov", "Prvá štvrť", "Úplnok", "Posledná štvrť", "Severná pologuľa", "Južná pologuľa"],
'sr': ["Mlađak", "Prva četvrt", "Uštap", "Poslednja četvrt", "Северна хемисфера", "Јужна хемисфера"],
'uk': ["Молодик", "Перша чверть", "Повня", "Остання чверть", "Північна півкуля", "Південна півкуля"],
'th': ["เดือนมืด", "ข้างขึ้น", "วันเพ็ญ", "ข้างแรม", "ซีกโลกเหนือ", "ซีกโลกใต้"],
'ko': ["초승달", "상현달", "보름달", "하현달", "북반구", "남반구"],
'kn': ["ಅಮಾವಾಸ್ಯೆ", "ಕೃಷ್ಣಪಕ್ಷ, ಅಷ್ಟಮಿ ", "ಹುಣ್ಣಿಮೆ", "ಶುಕ್ಲಪಕ್ಷ, ಅಷ್ಟಮಿ", "ಉತ್ತರ ಗೋಲಾರ್ಧ", "ದಕ್ಷಿಣ ಗೋಳಾರ್ಧ"],
'zh_TW': ["新月", "上弦", "滿月", "下弦", "北半球", "南半球"],
'ar': ["المحاق", "الربع الأول", "البدر", "الربع الأخير", "نصف الأرض الشمالي", "نصف الأرض الجنوبي"],
'nb': ["Nymåne", "Første kvarter", "Fullmåne", "Siste kvarter", "Den nordlege halvkula", "Den sørlege halvkula"],
'nn': ["Nymåne", "Første kvarter", "Fullmåne", "Siste kvarter", "Den nordlege halvkula", "Den sørlege halvkula"],
'cy': ["Lleuad Newydd", "Chwarter Cyntaf", "Lleuad Llawn", "Chwarter Olaf", "Hemisffer y Gogledd", "Hemisffer y De"],
'tr': ["Yeni Ay", "İlk Dördün", "Dolunay", "Son Dördün", "Kuzey yarımküre", "Güney yarımküre"],
}
|
"""Solution to Project Euler Problem 5
https://projecteuler.net/problem=5
gcd, lcm, and lcmm functions by J.F. Sebastian.
http://stackoverflow.com/a/147539/6119465
"""
from functools import reduce
MAXIMUM = 20
def gcd(num1, num2):
"""Return greatest common divisor using Euclid's Algorithm."""
while num2:
num1, num2 = num2, num1 % num2
return num1
def lcm(num1, num2):
"""Return lowest common multiple."""
return num1 * num2 // gcd(num1, num2)
def lcmm(*args):
"""Return LCM of args."""
return reduce(lcm, args)
def compute(maximum=MAXIMUM):
"""Compute the LCM of all integers from 1 to `maximum`."""
return lcmm(*range(1, maximum + 1))
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for collection models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import rights_manager
from core.platform import models
from core.tests import test_utils
import feconf
(base_models, collection_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.collection, models.NAMES.user])
class CollectionModelUnitTest(test_utils.GenericTestBase):
"""Test the CollectionModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
collection_models.CollectionModel.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP_IF_PUBLIC)
def test_has_reference_to_user_id(self):
collection = collection_domain.Collection.create_default_collection(
'id', title='A title',
category='A Category', objective='An Objective')
collection_services.save_new_collection('committer_id', collection)
self.assertTrue(
collection_models.CollectionModel
.has_reference_to_user_id('committer_id'))
self.assertFalse(
collection_models.CollectionModel
.has_reference_to_user_id('x_id'))
def test_get_collection_count(self):
collection = collection_domain.Collection.create_default_collection(
'id', title='A title',
category='A Category', objective='An Objective')
collection_services.save_new_collection('id', collection)
num_collections = (
collection_models.CollectionModel.get_collection_count())
self.assertEqual(num_collections, 1)
class CollectionRightsModelUnitTest(test_utils.GenericTestBase):
"""Test the CollectionRightsModel class."""
COLLECTION_ID_1 = '1'
COLLECTION_ID_2 = '2'
COLLECTION_ID_3 = '3'
COLLECTION_ID_4 = '4'
USER_ID_1 = 'id_1' # Related to all three collections
USER_ID_2 = 'id_2' # Related to a subset of the three collections
USER_ID_3 = 'id_3' # Related to no collections
USER_ID_4 = 'id_4' # Related to one collection and then removed from it
USER_ID_COMMITTER = 'id_5' # User id used in commits
USER_ID_4_OLD = 'id_4_old'
USER_ID_4_NEW = 'id_4_new'
USER_ID_5_OLD = 'id_5_old'
USER_ID_5_NEW = 'id_5_new'
USER_ID_6_OLD = 'id_6_old'
USER_ID_6_NEW = 'id_6_new'
def setUp(self):
super(CollectionRightsModelUnitTest, self).setUp()
user_models.UserSettingsModel(
id=self.USER_ID_1,
gae_id='gae_1_id',
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_ID_2,
gae_id='gae_2_id',
email='some_other@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
collection_models.CollectionRightsModel(
id=self.COLLECTION_ID_1,
owner_ids=[self.USER_ID_1],
editor_ids=[self.USER_ID_1],
voice_artist_ids=[self.USER_ID_1],
viewer_ids=[self.USER_ID_2],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.1
).save(
self.USER_ID_COMMITTER, 'Created new collection right',
[{'cmd': rights_manager.CMD_CREATE_NEW}])
collection_models.CollectionRightsModel(
id=self.COLLECTION_ID_2,
owner_ids=[self.USER_ID_1],
editor_ids=[self.USER_ID_1],
voice_artist_ids=[self.USER_ID_1],
viewer_ids=[self.USER_ID_1],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.2
).save(
self.USER_ID_COMMITTER, 'Created new collection right',
[{'cmd': rights_manager.CMD_CREATE_NEW}])
collection_models.CollectionRightsModel(
id=self.COLLECTION_ID_3,
owner_ids=[self.USER_ID_1],
editor_ids=[self.USER_ID_1],
voice_artist_ids=[self.USER_ID_2],
viewer_ids=[self.USER_ID_2],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.3
).save(
self.USER_ID_COMMITTER, 'Created new collection right',
[{'cmd': rights_manager.CMD_CREATE_NEW}])
collection_models.CollectionRightsModel(
id=self.COLLECTION_ID_4,
owner_ids=[self.USER_ID_4],
editor_ids=[self.USER_ID_4],
voice_artist_ids=[self.USER_ID_4],
viewer_ids=[self.USER_ID_4],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.4
).save(
self.USER_ID_COMMITTER, 'Created new collection right',
[{'cmd': rights_manager.CMD_CREATE_NEW}])
self.col_1_dict = (
collection_models.CollectionRightsModel.get_by_id(
self.COLLECTION_ID_1).to_dict())
def test_get_deletion_policy(self):
self.assertEqual(
collection_models.CollectionRightsModel.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP_IF_PUBLIC)
def test_convert_to_valid_dict_format_basic(self):
transformed_dict = (
collection_models.CollectionRightsModel
.convert_to_valid_dict(self.col_1_dict))
self.assertEqual(transformed_dict, self.col_1_dict)
def test_convert_to_valid_dict_format_status(self):
broken_dict = dict(**self.col_1_dict)
broken_dict['status'] = 'publicized'
transformed_dict = (
collection_models.CollectionRightsModel
.convert_to_valid_dict(broken_dict))
self.assertEqual(transformed_dict, self.col_1_dict)
def test_convert_to_valid_dict_format_translator_ids(self):
broken_dict = dict(**self.col_1_dict)
del broken_dict['voice_artist_ids']
broken_dict['translator_ids'] = [self.USER_ID_1]
transformed_dict = (
collection_models.CollectionRightsModel
.convert_to_valid_dict(broken_dict))
self.assertEqual(transformed_dict, self.col_1_dict)
def test_has_reference_to_user_id(self):
with self.swap(base_models, 'FETCH_BATCH_SIZE', 1):
self.assertTrue(
collection_models.CollectionRightsModel
.has_reference_to_user_id(self.USER_ID_1))
self.assertTrue(
collection_models.CollectionRightsModel
.has_reference_to_user_id(self.USER_ID_2))
self.assertTrue(
collection_models.CollectionRightsModel
.has_reference_to_user_id(self.USER_ID_4))
self.assertTrue(
collection_models.CollectionRightsModel
.has_reference_to_user_id(self.USER_ID_COMMITTER))
self.assertFalse(
collection_models.CollectionRightsModel
.has_reference_to_user_id(self.USER_ID_3))
def test_save(self):
collection_models.CollectionRightsModel(
id='id',
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
voice_artist_ids=['voice_artist_ids'],
viewer_ids=['viewer_ids'],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
).save(self.USER_ID_COMMITTER, 'Created new collection',
[{'cmd': rights_manager.CMD_CREATE_NEW}])
collection_model = collection_models.CollectionRightsModel.get('id')
self.assertEqual('id', collection_model.id)
def test_export_data_on_highly_involved_user(self):
"""Test export data on user involved in all datastore collections."""
collection_ids = (
collection_models.CollectionRightsModel.export_data(
self.USER_ID_1))
expected_collection_ids = {
'owned_collection_ids': (
[self.COLLECTION_ID_1,
self.COLLECTION_ID_2,
self.COLLECTION_ID_3]),
'editable_collection_ids': (
[self.COLLECTION_ID_1,
self.COLLECTION_ID_2,
self.COLLECTION_ID_3]),
'voiced_collection_ids': (
[self.COLLECTION_ID_1, self.COLLECTION_ID_2]),
'viewable_collection_ids': [self.COLLECTION_ID_2]
}
self.assertEqual(expected_collection_ids, collection_ids)
def test_export_data_on_partially_involved_user(self):
"""Test export data on user involved in some datastore collections."""
collection_ids = (
collection_models.CollectionRightsModel.export_data(
self.USER_ID_2))
expected_collection_ids = {
'owned_collection_ids': [],
'editable_collection_ids': [],
'voiced_collection_ids': [self.COLLECTION_ID_3],
'viewable_collection_ids': (
[self.COLLECTION_ID_1, self.COLLECTION_ID_3])
}
self.assertEqual(expected_collection_ids, collection_ids)
def test_export_data_on_uninvolved_user(self):
"""Test for empty lists when user has no collection involvement."""
collection_ids = (
collection_models.CollectionRightsModel.export_data(
self.USER_ID_3))
expected_collection_ids = {
'owned_collection_ids': [],
'editable_collection_ids': [],
'voiced_collection_ids': [],
'viewable_collection_ids': []
}
self.assertEqual(expected_collection_ids, collection_ids)
def test_export_data_on_invalid_user(self):
"""Test for empty lists when the user_id is invalid."""
collection_ids = (
collection_models.CollectionRightsModel.export_data(
'fake_user'))
expected_collection_ids = {
'owned_collection_ids': [],
'editable_collection_ids': [],
'voiced_collection_ids': [],
'viewable_collection_ids': []
}
self.assertEqual(expected_collection_ids, collection_ids)
class CollectionCommitLogEntryModelUnitTest(test_utils.GenericTestBase):
"""Test the CollectionCommitLogEntryModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
collection_models.CollectionCommitLogEntryModel
.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP_IF_PUBLIC)
def test_has_reference_to_user_id(self):
commit = collection_models.CollectionCommitLogEntryModel.create(
'b', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False)
commit.collection_id = 'b'
commit.put()
self.assertTrue(
collection_models.CollectionCommitLogEntryModel
.has_reference_to_user_id('committer_id'))
self.assertFalse(
collection_models.CollectionCommitLogEntryModel
.has_reference_to_user_id('x_id'))
def test_get_all_non_private_commits(self):
private_commit = collection_models.CollectionCommitLogEntryModel.create(
'a', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PRIVATE, False)
public_commit = collection_models.CollectionCommitLogEntryModel.create(
'b', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False)
private_commit.collection_id = 'a'
public_commit.collection_id = 'b'
private_commit.put()
public_commit.put()
commits = (
collection_models.CollectionCommitLogEntryModel
.get_all_non_private_commits(2, None, max_age=None))
self.assertEqual(False, commits[2])
self.assertEqual('collection-b-0', commits[0][0].id)
def test_get_all_non_private_commits_with_invalid_max_age(self):
with self.assertRaisesRegexp(
Exception,
'max_age must be a datetime.timedelta instance or None.'):
(
collection_models.CollectionCommitLogEntryModel
.get_all_non_private_commits(
2, None, max_age='invalid_max_age'))
def test_get_all_non_private_commits_with_max_age(self):
private_commit = collection_models.CollectionCommitLogEntryModel.create(
'a', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PRIVATE, False)
public_commit = collection_models.CollectionCommitLogEntryModel.create(
'b', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False)
# We need to manually set collection_id as it is a property of
# CollectionCommitLogEntryModel only and create() does not accept
# collection_id as a parameter. This property is set in
# CollectionModel._trusted_commit().
private_commit.collection_id = 'a'
public_commit.collection_id = 'b'
private_commit.put()
public_commit.put()
max_age = datetime.timedelta(hours=1)
results, _, more = (
collection_models.CollectionCommitLogEntryModel
.get_all_non_private_commits(2, None, max_age=max_age))
self.assertFalse(more)
self.assertEqual(len(results), 1)
self.assertEqual('collection-b-0', results[0].id)
class CollectionSummaryModelUnitTest(test_utils.GenericTestBase):
"""Tests for the CollectionSummaryModel."""
COLLECTION_ID_1 = '1'
COLLECTION_ID_2 = '2'
COLLECTION_ID_3 = '3'
USER_ID_1_OLD = 'id_1_old'
USER_ID_1_NEW = 'id_1_new'
USER_ID_2_OLD = 'id_2_old'
USER_ID_2_NEW = 'id_2_new'
USER_ID_3_OLD = 'id_3_old'
USER_ID_3_NEW = 'id_3_new'
def setUp(self):
super(CollectionSummaryModelUnitTest, self).setUp()
user_models.UserSettingsModel(
id=self.USER_ID_1_NEW,
gae_id='gae_1_id',
email='some@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
user_models.UserSettingsModel(
id=self.USER_ID_2_NEW,
gae_id='gae_2_id',
email='some_other@email.com',
role=feconf.ROLE_ID_COLLECTION_EDITOR
).put()
def test_get_deletion_policy(self):
self.assertEqual(
collection_models.CollectionSummaryModel.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP_IF_PUBLIC)
def test_has_reference_to_user_id(self):
collection_models.CollectionSummaryModel(
id='id0',
title='title',
category='category',
objective='objective',
language_code='language_code',
community_owned=False,
owner_ids=['owner_id'],
editor_ids=['editor_id'],
viewer_ids=['viewer_id'],
contributor_ids=['contributor_id'],
).put()
self.assertTrue(
collection_models.CollectionSummaryModel
.has_reference_to_user_id('owner_id'))
self.assertTrue(
collection_models.CollectionSummaryModel
.has_reference_to_user_id('editor_id'))
self.assertTrue(
collection_models.CollectionSummaryModel
.has_reference_to_user_id('viewer_id'))
self.assertTrue(
collection_models.CollectionSummaryModel
.has_reference_to_user_id('contributor_id'))
self.assertFalse(
collection_models.CollectionSummaryModel
.has_reference_to_user_id('x_id'))
def test_get_non_private(self):
public_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id0',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PUBLIC,
community_owned=False,
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
viewer_ids=['viewer_ids'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
public_collection_summary_model.put()
private_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id1',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PRIVATE,
community_owned=False,
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
viewer_ids=['viewer_ids'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
private_collection_summary_model.put()
collection_summary_models = (
collection_models.CollectionSummaryModel.get_non_private())
self.assertEqual(1, len(collection_summary_models))
def test_get_private_at_least_viewable(self):
viewable_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id0',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PRIVATE,
community_owned=False,
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
viewer_ids=['a'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
viewable_collection_summary_model.put()
unviewable_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id1',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PRIVATE,
community_owned=False,
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
viewer_ids=['viewer_ids'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
unviewable_collection_summary_model.put()
collection_summary_models = (
collection_models.CollectionSummaryModel
.get_private_at_least_viewable('a'))
self.assertEqual(1, len(collection_summary_models))
self.assertEqual('id0', collection_summary_models[0].id)
def test_get_at_least_editable(self):
editable_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id0',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PRIVATE,
community_owned=False,
owner_ids=['a'],
editor_ids=['editor_ids'],
viewer_ids=['viewer_ids'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
editable_collection_summary_model.put()
uneditable_collection_summary_model = (
collection_models.CollectionSummaryModel(
id='id1',
title='title',
category='category',
objective='objective',
language_code='language_code',
tags=['tags'],
status=constants.ACTIVITY_STATUS_PRIVATE,
community_owned=False,
owner_ids=['owner_ids'],
editor_ids=['editor_ids'],
viewer_ids=['viewer_ids'],
contributor_ids=[''],
contributors_summary={},
version=0,
node_count=0,
collection_model_last_updated=None,
collection_model_created_on=None,
))
uneditable_collection_summary_model.put()
collection_summary_models = (
collection_models.CollectionSummaryModel
.get_at_least_editable('a'))
self.assertEqual(1, len(collection_summary_models))
self.assertEqual('id0', collection_summary_models[0].id)
collection_summary_models = (
collection_models.CollectionSummaryModel
.get_at_least_editable('viewer_ids'))
self.assertEqual(0, len(collection_summary_models))
|
# coding: utf-8
# @时间 : 2022/1/18 8:42 上午
# @作者 : 文山
# @邮箱 : wolaizhinidexin@163.com
# @作用 :
# @文件 : predict.py
# @微信 :qwentest123
import numpy as np
import pandas as pd
from tensorflow.keras.layers import Dense, Flatten, Conv2D, AvgPool2D, MaxPool2D
from tensorflow.keras import Model
import json
from PIL import Image
import time
from model19 import DarkNet19
from model53 import DarkNet53
from modelcsp import CSPDarkNet53
def main(img_path):
img_height = 224
img_width = 224
img = Image.open(img_path)
img = img.resize((img_width, img_height))
img = np.array(img) / 255.
# 表示在a的第一个维度上增加一个新的维度,而其他维度整体往右移,最终得到shape为(1, m, n, c)的新数组
img = (np.expand_dims(img, 0))
class_indices = {"0": "daisy", "1": "dandelion", "2": "roses", "3": "sunflowers", "4": "tulips"}
model = CSPDarkNet53(num_class=len(class_indices))
model.build((1, 224, 224, 3))
weight_path = "./weights/csp_darknet_5310.h5"
model.load_weights(weight_path)
t1 = time.time()
result = model.predict(img)
print("预测时间={}".format(time.time() - t1))
# np.squeeze这个函数的作用是去掉矩阵里维度为1 的维度。例:(1, 300)的矩阵经由np.squeeze处理后变成300;
result = np.squeeze(result)
predict_class = np.argmax(result)
# 预测的结果
res = "class: {} prob: {:.2}".format(class_indices[str(predict_class)],
result[predict_class])
print(res)
if __name__ == "__main__":
main('../AlexNet/test/1.jpeg')
|
# -*- coding: utf-8 -*-
from benedict import benedict
import time
import unittest
class github_issue_0039_test_case(unittest.TestCase):
"""
https://github.com/fabiocaccamo/python-benedict/issues/39
To run this specific test:
- Run python -m unittest tests.github.test_issue_0039
"""
def test_performance(self):
b = benedict()
i_iterations = 500
j_iterations = 100
t = time.time()
for i in range(0, i_iterations):
for j in range(0, j_iterations):
b.set("{}.{}".format(i, j), "text-{}-{}".format(i, j))
# print(b.dump())
e = (time.time() - t)
# print('benedict set: {} seconds'.format(e))
# self.assertTrue(e < 5)
t = time.time()
for i in range(0, i_iterations):
for j in range(0, j_iterations):
b.get("{}.{}".format(i, j), None)
e = (time.time() - t)
# print('benedict get: {} seconds'.format(e))
# self.assertTrue(e < 5)
b.clear()
t = time.time()
for i in range(0, i_iterations):
for j in range(0, j_iterations):
b.get("{}.{}".format(i, j), None)
e = (time.time() - t)
# print('benedict get (default): {} seconds'.format(e))
# self.assertTrue(e < 5)
|
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from PIL import Image, ImageFile, ImagePalette, _binary
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
#
# helpers
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
@property
def n_frames(self):
return len(self.layers)
@property
def is_animated(self):
return len(self.layers) > 1
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4))
x0 = i32(read(4))
y1 = i32(read(4))
x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4))
combined = 0
if size:
length = i32(read(4))
if length:
mask_y = i32(read(4))
mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y
mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(size - combined, 1)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i += 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open("PSD", PsdImageFile, _accept)
Image.register_extension("PSD", ".psd")
|
"""Bokeh loopitplot."""
import numpy as np
from bokeh.models import BoxAnnotation
from matplotlib.colors import hsv_to_rgb, rgb_to_hsv, to_hex, to_rgb
from xarray import DataArray
from ....stats.density_utils import kde
from ...plot_utils import _scale_fig_size
from .. import show_layout
from . import backend_kwarg_defaults, create_axes_grid
def plot_loo_pit(
ax,
figsize,
ecdf,
loo_pit,
loo_pit_ecdf,
unif_ecdf,
p975,
p025,
fill_kwargs,
ecdf_fill,
use_hdi,
x_vals,
hdi_kwargs,
hdi_odds,
n_unif,
unif,
plot_unif_kwargs,
loo_pit_kde,
legend, # pylint: disable=unused-argument
y_hat,
y,
color,
textsize,
hdi_prob,
plot_kwargs,
backend_kwargs,
show,
):
"""Bokeh loo pit plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)
if ax is None:
backend_kwargs.setdefault("x_range", (0, 1))
ax = create_axes_grid(
1,
figsize=figsize,
squeeze=True,
backend_kwargs=backend_kwargs,
)
plot_kwargs = {} if plot_kwargs is None else plot_kwargs
plot_kwargs.setdefault("color", to_hex(color))
plot_kwargs.setdefault("linewidth", linewidth * 1.4)
if isinstance(y, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
elif isinstance(y, DataArray) and y.name is not None:
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
elif isinstance(y_hat, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
elif isinstance(y_hat, DataArray) and y_hat.name is not None:
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
else:
label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"
plot_kwargs.setdefault("legend_label", label)
plot_unif_kwargs = {} if plot_unif_kwargs is None else plot_unif_kwargs
light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation
light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation
plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
plot_unif_kwargs.setdefault("alpha", 0.5)
plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)
if ecdf:
n_data_points = loo_pit.size
plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
if ecdf_fill:
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
fill_kwargs.setdefault("alpha", 0.5)
fill_kwargs.setdefault(
"step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
)
fill_kwargs.setdefault("legend_label", "{:.3g}% credible interval".format(hdi_prob))
elif use_hdi:
if hdi_kwargs is None:
hdi_kwargs = {}
hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
hdi_kwargs.setdefault("alpha", 0.35)
if ecdf:
if plot_kwargs.get("drawstyle") == "steps-mid":
ax.step(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
mode="center",
)
else:
ax.line(
np.hstack((0, loo_pit, 1)),
np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
if ecdf_fill:
if fill_kwargs.get("drawstyle") == "steps-mid":
# use step patch when you find out how to do that
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
ax.patch(
np.concatenate((unif_ecdf, unif_ecdf[::-1])),
np.concatenate((p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
fill_color=fill_kwargs.get("color"),
fill_alpha=fill_kwargs.get("alpha", 1.0),
)
else:
if fill_kwargs is not None and fill_kwargs.get("drawstyle") == "steps-mid":
ax.step(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 1.0),
mode="center",
)
ax.step(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
mode="center",
)
else:
ax.line(
unif_ecdf,
p975 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
unif_ecdf,
p025 - unif_ecdf,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 1.0),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
else:
if use_hdi:
patch = BoxAnnotation(
bottom=hdi_odds[1],
top=hdi_odds[0],
fill_alpha=hdi_kwargs.pop("alpha"),
fill_color=hdi_kwargs.pop("color"),
**hdi_kwargs
)
patch.level = "underlay"
ax.add_layout(patch)
# Adds horizontal reference line
ax.line([0, 1], [1, 1], line_color="white", line_width=1.5)
else:
for idx in range(n_unif):
x_s, unif_density = kde(unif[idx, :])
ax.line(
x_s,
unif_density,
line_color=plot_unif_kwargs.get("color", "black"),
line_alpha=plot_unif_kwargs.get("alpha", 0.1),
line_width=plot_unif_kwargs.get("linewidth", 1.0),
)
ax.line(
x_vals,
loo_pit_kde,
line_color=plot_kwargs.get("color", "black"),
line_alpha=plot_kwargs.get("alpha", 1.0),
line_width=plot_kwargs.get("linewidth", 3.0),
)
# Sets xlim(0, 1)
ax.line(0, 0)
ax.line(1, 0)
show_layout(ax, show)
return ax
|
from .patient import CreatePatient
from .pathway import CreatePathway
from .decision_point import CreateDecisionPoint
from .user import CreateUser
from .milestone import ImportMilestone
from .role import create_role
|
from diskimgcreator import try_create_image, _parse_size, _set_verbose
from diskimgmounter import try_mount_image
import unittest
import os
import datetime
_set_verbose(True)
class TestParseSize(unittest.TestCase):
def test_parse_size(self):
self.assertEqual(_parse_size("5.5"), 5.5 * 1000 ** 2)
self.assertEqual(_parse_size("100KB"), 1000 * 100)
self.assertEqual(_parse_size("1KiB"), 1024)
self.assertEqual(_parse_size("1MB"), 1000 * 1000)
self.assertEqual(_parse_size("1MiB"), 1024 * 1024)
self.assertEqual(_parse_size("1.5MiB"), 1024 * 1024 * 1.5)
self.assertEqual(_parse_size("1.75GiB"), 1024 * 1024 * 1024 * 1.75)
class TestCreateImage(unittest.TestCase):
def test_create_image(self):
try_create_image(
"../example01", "../temp/example01.img", overwrite=True, use_partfs=True
)
def test_create_image2(self):
try_create_image(
"../example02", "../temp/example02.img", overwrite=True, use_partfs=True
)
def test_create_image3(self):
try_create_image(
"../example03", "../temp/example03.img", overwrite=True, use_partfs=True
)
def test_mount_image1(self):
with try_mount_image(
"../temp/example03.img", partitions=[1, 2], use_partfs=True
):
print("Do with the mounts!")
if __name__ == "__main__":
# Change working directory to the tests.py path
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
unittest.main()
|
import typing
from authlib.oauth2.rfc6749 import OAuth2Error
from .authorization_server import AuthorizationServer, RevocationEndpoint
from .resource_protector import ResourceProtector, resource_protected
from .grants import AuthorizationCodeGrant, RefreshTokenGrant
if typing.TYPE_CHECKING:
from aiohttp.web import Application
else:
Application = typing.Any
__all__ = ['setup', 'resource_protected', 'OAuth2Error']
def setup(app: Application):
authorization_server = AuthorizationServer()
# authorization_server.register_grant(grants.ImplicitGrant)
# authorization_server.register_grant(grants.ClientCredentialsGrant)
# authorization_server.register_grant(PasswordGrant)
authorization_server.register_grant(AuthorizationCodeGrant)
authorization_server.register_grant(RefreshTokenGrant)
authorization_server.register_endpoint(RevocationEndpoint)
protector = ResourceProtector()
app['oauth_server'] = authorization_server
app['resource_protector'] = protector
|
#! /usr/bin/python3
import os
import re
import subprocess
# Run checkpatch for the lint-all tool; output:
#
# stdout: lines in the form FILE:LINENUMBER:message
# return: NUMBER-OF-ERRORS, WARNINGS, BLOCKAGES
regex_c = re.compile(r".*\.(c|C|cpp|CPP|h|HH|hxx|cxx)$")
lint_checkpatch_name = "checkpatch"
def lint_checkpatch(repo, cf):
if cf:
return
for filename in repo.filenames:
if regex_c.match(filename):
break
else:
# No C or H files
repo.log.info("not running, as there are no C/C++ files")
return
# Deployment specific -- dep on what the environment is saying
# FIXME: what if ZEPHYR_BASE is generally defined in the
# environment but we want to checkpatch with other settings?
if 'ZEPHYR_BASE' in os.environ:
# The typedefs flag has to be given here vs the config file so we
# have access to the path to the Zephyr kernel tree
flags_deployment = "--typedefsfile=" \
"$ZEPHYR_BASE/scripts/checkpatch/typedefsfile"
cmd = "$ZEPHYR_BASE/scripts/checkpatch.pl"
else:
flags_deployment = ""
cmd = "checkpatch.pl"
repo.warning("Using generic checkpatch (ZEPHYR_BASE undefined)")
return
checkpatch_flags = "--patch --showfile " \
"--no-summary --terse " \
+ flags_deployment
try:
if repo.is_dirty(untracked_files = False):
cmd = "set -o pipefail; " \
"git -C '%s' diff HEAD" \
" | %s %s - 2>&1" \
% (repo.working_tree_dir, cmd, checkpatch_flags)
else:
cmd = "set -o pipefail; " \
"git -C '%s' format-patch --stdout HEAD~1 " \
" | %s %s - 2>&1" \
% (repo.working_tree_dir, cmd, checkpatch_flags)
# yeah, this is ugly...some versions of Ubuntu use not
# bash as a default shell and we need pipefail--I bet
# there is a better way to do it, but I am sleepy now
cmdline = [ 'bash', '-c', cmd ]
repo.log.debug("running %s", cmdline)
output = subprocess.check_output(
cmdline, stderr = subprocess.STDOUT, universal_newlines = True)
except FileNotFoundError:
repo.blockage("Can't find checkpatch? for Zephyr, export ZEPHYR_BASE")
return
except subprocess.CalledProcessError as e:
output = e.output
warnings = 0
errors = 0
if output:
repo.message("E: checkpatch reports errors or warnings")
regex = re.compile(":(?P<line_number>[0-9]+): (?P<kind>(ERROR|WARNING|CHECK)):")
line_cnt = 0
# checkpatch will always print the path relative to the origin
# of the repository, so we complement so the output is
# consistent
if repo.relpath == ".":
reldir = ""
else:
reldir = repo.relpath + "/"
for line in output.splitlines():
line_cnt += 1
line = line.strip()
m = regex.search(line)
if not m:
continue
line_number = int(m.groupdict()['line_number'])
kind = m.groupdict()['kind']
if kind == 'WARNING' or kind == 'CHECK':
repo.warning(reldir, line_number = line)
elif kind == 'ERROR':
repo.error(reldir, line_number = line)
else:
assert True, "Unknown kind: %s" % kind
|
from bisect import bisect_left
from functools import lru_cache, reduce
from typing import List, Dict, Set, Tuple, Any, Optional, Union # mypy type checking
from .data import Attribute, Race
from .unit_command import UnitCommand
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
from .constants import ZERGLING
FREE_MORPH_ABILITY_CATEGORIES = [
"Lower", "Raise", # SUPPLYDEPOT
"Land", "Lift", # Flying buildings
]
def split_camel_case(text) -> list:
"""Splits words from CamelCase text."""
return list(reduce(
lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]),
text,
[]
))
class GameData:
def __init__(self, data):
ids = set(a.value for a in AbilityId if a.value != 0)
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities if a.ability_id in ids}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability) -> "Cost":
if isinstance(ability, AbilityId):
ability = self.abilities[ability.value]
elif isinstance(ability, UnitCommand):
ability = self.abilities[ability.ability.value]
assert isinstance(ability, AbilityData), f"C: {ability}"
for unit in self.units.values():
if unit.creation_ability is None:
continue
if not AbilityData.id_exists(unit.creation_ability.id.value):
continue
if unit.creation_ability.is_free_morph:
continue
if unit.creation_ability == ability:
if unit.id == ZERGLING:
# HARD CODED: zerglings are generated in pairs
return Cost(
unit.cost.minerals * 2,
unit.cost.vespene * 2,
unit.cost.time
)
# Correction for morphing units, e.g. orbital would return 550/0 instead of actual 150/0
morph_cost = unit.morph_cost
if morph_cost: # can be None
return morph_cost
# Correction for zerg structures without morph: Extractor would return 75 instead of actual 25
return unit.cost_zerg_corrected
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData:
ability_ids: List[int] = [] # sorted list
for ability_id in AbilityId: # 1000 items Enum is slow
ability_ids.append(ability_id.value)
ability_ids.remove(0)
ability_ids.sort()
@classmethod
def id_exists(cls, ability_id):
assert isinstance(ability_id, int), f"Wrong type: {ability_id} is not int"
if ability_id == 0:
return False
i = bisect_left(cls.ability_ids, ability_id) # quick binary search
return i != len(cls.ability_ids) and cls.ability_ids[i] == ability_id
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
assert self.id != 0
def __repr__(self) -> str:
return f"AbilityData(name={self._proto.button_name})"
@property
def id(self) -> AbilityId:
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def link_name(self) -> str:
""" For Stimpack this returns 'BarracksTechLabResearch' """
return self._proto.link_name
@property
def button_name(self) -> str:
""" For Stimpack this returns 'Stimpack' """
return self._proto.button_name
@property
def friendly_name(self) -> str:
""" For Stimpack this returns 'Research Stimpack' """
return self._proto.friendly_name
@property
def is_free_morph(self) -> bool:
parts = split_camel_case(self._proto.link_name)
for p in parts:
if p in FREE_MORPH_ABILITY_CATEGORIES:
return True
return False
@property
def cost(self) -> "Cost":
return self._game_data.calculate_ability_cost(self.id)
class UnitTypeData:
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self) -> str:
return f"UnitTypeData(name={self.name})"
@property
def id(self) -> UnitTypeId:
return UnitTypeId(self._proto.unit_id)
@property
def name(self) -> str:
return self._proto.name
@property
def creation_ability(self) -> AbilityData:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self) -> List[Attribute]:
return self._proto.attributes
def has_attribute(self, attr) -> bool:
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self) -> bool:
return self._proto.has_minerals
@property
def has_vespene(self) -> bool:
return self._proto.has_vespene
@property
def cargo_size(self) -> int:
""" How much cargo this unit uses up in cargo_space """
return self._proto.cargo_size
@property
def tech_requirement(self) -> Optional[UnitTypeId]:
""" Tech-building requirement of buildings - may work for units but unreliably """
if self._proto.tech_requirement == 0:
return None
if self._proto.tech_requirement not in self._game_data.units:
return None
return UnitTypeId(self._proto.tech_requirement)
@property
def tech_alias(self) -> Optional[List[UnitTypeId]]:
""" Building tech equality, e.g. OrbitalCommand is the same as CommandCenter """
""" Building tech equality, e.g. Hive is the same as Lair and Hatchery """
return_list = []
for tech_alias in self._proto.tech_alias:
if tech_alias in self._game_data.units:
return_list.append(UnitTypeId(tech_alias))
""" For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] """
""" For SCV, this returns None """
if return_list:
return return_list
return None
@property
def unit_alias(self) -> Optional[UnitTypeId]:
""" Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand """
if self._proto.unit_alias == 0:
return None
if self._proto.unit_alias not in self._game_data.units:
return None
""" For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand """
return UnitTypeId(self._proto.unit_alias)
@property
def race(self) -> Race:
return Race(self._proto.race)
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.build_time
)
@property
def cost_zerg_corrected(self) -> "Cost":
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
# a = self._game_data.units(UnitTypeId.ZERGLING)
# print(a)
# print(vars(a))
return Cost(
self._proto.mineral_cost - 50,
self._proto.vespene_cost,
self._proto.build_time
)
else:
return self.cost
@property
def morph_cost(self) -> Optional["Cost"]:
""" This returns 150 minerals for OrbitalCommand instead of 550 """
# Fix for BARRACKSREACTOR which has tech alias [REACTOR] which has (0, 0) cost
if self.tech_alias is None or self.tech_alias[0] in {UnitTypeId.TECHLAB, UnitTypeId.REACTOR}:
return None
# Morphing a HIVE would have HATCHERY and LAIR in the tech alias - now subtract HIVE cost from LAIR cost instead of from HATCHERY cost
tech_alias_cost_minerals = max([self._game_data.units[tech_alias.value].cost.minerals for tech_alias in self.tech_alias])
tech_alias_cost_vespene = max([self._game_data.units[tech_alias.value].cost.vespene for tech_alias in self.tech_alias])
return Cost(
self._proto.mineral_cost - tech_alias_cost_minerals,
self._proto.vespene_cost - tech_alias_cost_vespene,
self._proto.build_time
)
class UpgradeData:
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
def __repr__(self):
return f"UpgradeData({self.name} - research ability: {self.research_ability}, {self.cost})"
@property
def name(self) -> str:
return self._proto.name
@property
def research_ability(self) -> Optional[AbilityData]:
if self._proto.ability_id == 0:
return None
if self._proto.ability_id not in self._game_data.abilities:
return None
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self) -> "Cost":
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost,
self._proto.research_time
)
class Cost:
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self) -> str:
return f"Cost({self.minerals}, {self.vespene})"
def __eq__(self, other) -> bool:
return self.minerals == other.minerals and self.vespene == other.vespene
def __ne__(self, other) -> bool:
return self.minerals != other.minerals or self.vespene != other.vespene
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import glob
from azurelinuxagent.common import event
from azurelinuxagent.common.protocol.wire import *
from tests.protocol.mockwiredata import *
data_with_bom = b'\xef\xbb\xbfhehe'
testurl = 'http://foo'
testtype = 'BlockBlob'
wireserver_url = '168.63.129.16'
@patch("time.sleep")
@patch("azurelinuxagent.common.protocol.wire.CryptUtil")
@patch("azurelinuxagent.common.protocol.healthservice.HealthService._report")
class TestWireProtocol(AgentTestCase):
def setUp(self):
super(TestWireProtocol, self).setUp()
HostPluginProtocol.set_default_channel(False)
def _test_getters(self, test_data, __, MockCryptUtil, _):
MockCryptUtil.side_effect = test_data.mock_crypt_util
with patch.object(restutil, 'http_get', test_data.mock_http_get):
protocol = WireProtocol(wireserver_url)
protocol.detect()
protocol.get_vminfo()
protocol.get_certs()
ext_handlers, etag = protocol.get_ext_handlers()
for ext_handler in ext_handlers.extHandlers:
protocol.get_ext_handler_pkgs(ext_handler)
crt1 = os.path.join(self.tmp_dir,
'33B0ABCE4673538650971C10F7D7397E71561F35.crt')
crt2 = os.path.join(self.tmp_dir,
'4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.crt')
prv2 = os.path.join(self.tmp_dir,
'4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3.prv')
self.assertTrue(os.path.isfile(crt1))
self.assertTrue(os.path.isfile(crt2))
self.assertTrue(os.path.isfile(prv2))
self.assertEqual("1", protocol.get_incarnation())
def test_getters(self, *args):
"""Normal case"""
test_data = WireProtocolData(DATA_FILE)
self._test_getters(test_data, *args)
def test_getters_no_ext(self, *args):
"""Provision with agent is not checked"""
test_data = WireProtocolData(DATA_FILE_NO_EXT)
self._test_getters(test_data, *args)
def test_getters_ext_no_settings(self, *args):
"""Extensions without any settings"""
test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS)
self._test_getters(test_data, *args)
def test_getters_ext_no_public(self, *args):
"""Extensions without any public settings"""
test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC)
self._test_getters(test_data, *args)
@patch("azurelinuxagent.common.protocol.healthservice.HealthService.report_host_plugin_extension_artifact")
def test_getters_with_stale_goal_state(self, patch_report, *args):
test_data = WireProtocolData(DATA_FILE)
test_data.emulate_stale_goal_state = True
self._test_getters(test_data, *args)
# Ensure HostPlugin was invoked
self.assertEqual(1, test_data.call_counts["/versions"])
self.assertEqual(2, test_data.call_counts["extensionArtifact"])
# Ensure the expected number of HTTP calls were made
# -- Tracking calls to retrieve GoalState is problematic since it is
# fetched often; however, the dependent documents, such as the
# HostingEnvironmentConfig, will be retrieved the expected number
self.assertEqual(2, test_data.call_counts["hostingenvuri"])
self.assertEqual(1, patch_report.call_count)
def test_call_storage_kwargs(self, *args):
from azurelinuxagent.common.utils import restutil
with patch.object(restutil, 'http_get') as http_patch:
http_req = restutil.http_get
url = testurl
headers = {}
# no kwargs -- Default to True
WireClient.call_storage_service(http_req)
# kwargs, no use_proxy -- Default to True
WireClient.call_storage_service(http_req,
url,
headers)
# kwargs, use_proxy None -- Default to True
WireClient.call_storage_service(http_req,
url,
headers,
use_proxy=None)
# kwargs, use_proxy False -- Keep False
WireClient.call_storage_service(http_req,
url,
headers,
use_proxy=False)
# kwargs, use_proxy True -- Keep True
WireClient.call_storage_service(http_req,
url,
headers,
use_proxy=True)
# assert
self.assertTrue(http_patch.call_count == 5)
for i in range(0,5):
c = http_patch.call_args_list[i][-1]['use_proxy']
self.assertTrue(c == (True if i != 3 else False))
def test_status_blob_parsing(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(WireProtocolData(DATA_FILE).ext_conf)
self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob,
u'https://yuezhatest.blob.core.windows.net/vhds/test'
u'-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se'
u'=9999-01-01&sk=key1&sv=2014-02-14&sig'
u'=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D')
self.assertEqual(wire_protocol_client.ext_conf.status_upload_blob_type,
u'BlockBlob')
pass
def test_get_host_ga_plugin(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
with patch.object(WireClient, "get_goal_state", return_value = goal_state) as patch_get_goal_state:
host_plugin = wire_protocol_client.get_host_plugin()
self.assertEqual(goal_state.container_id, host_plugin.container_id)
self.assertEqual(goal_state.role_config_name, host_plugin.role_config_name)
self.assertEqual(1, patch_get_goal_state.call_count)
@patch("azurelinuxagent.common.utils.restutil.http_request", side_effect=IOError)
@patch("azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin")
@patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.get_artifact_request")
def test_download_ext_handler_pkg_fallback(self, patch_request, patch_get_host, patch_http, *args):
ext_uri = 'extension_uri'
host_uri = 'host_uri'
patch_get_host.return_value = HostPluginProtocol(host_uri, 'container_id', 'role_config')
patch_request.return_value = [host_uri, {}]
WireProtocol(wireserver_url).download_ext_handler_pkg(ext_uri)
self.assertEqual(patch_http.call_count, 2)
self.assertEqual(patch_request.call_count, 1)
self.assertEqual(patch_http.call_args_list[0][0][1], ext_uri)
self.assertEqual(patch_http.call_args_list[1][0][1], host_uri)
@patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state")
def test_upload_status_blob_default(self, *args):
"""
Default status blob method is HostPlugin.
"""
vmstatus = VMStatus(message="Ready", status="Ready")
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.status_upload_blob = testurl
wire_protocol_client.ext_conf.status_upload_blob_type = testtype
wire_protocol_client.status_blob.vm_status = vmstatus
with patch.object(WireClient, "get_goal_state") as patch_get_goal_state:
with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload:
with patch.object(StatusBlob, "upload") as patch_default_upload:
HostPluginProtocol.set_default_channel(False)
wire_protocol_client.upload_status_blob()
# do not call the direct method unless host plugin fails
patch_default_upload.assert_not_called()
# host plugin always fetches a goal state
patch_get_goal_state.assert_called_once_with()
# host plugin uploads the status blob
patch_host_ga_plugin_upload.assert_called_once_with(ANY, testurl, 'BlockBlob')
@patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state")
def test_upload_status_blob_host_ga_plugin(self, *args):
vmstatus = VMStatus(message="Ready", status="Ready")
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.status_upload_blob = testurl
wire_protocol_client.ext_conf.status_upload_blob_type = testtype
wire_protocol_client.status_blob.vm_status = vmstatus
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
with patch.object(HostPluginProtocol,
"ensure_initialized",
return_value=True):
with patch.object(StatusBlob,
"upload",
return_value=False) as patch_default_upload:
with patch.object(HostPluginProtocol,
"_put_block_blob_status") as patch_http:
HostPluginProtocol.set_default_channel(False)
wire_protocol_client.get_goal_state = Mock(return_value=goal_state)
wire_protocol_client.upload_status_blob()
patch_default_upload.assert_not_called()
self.assertEqual(1, wire_protocol_client.get_goal_state.call_count)
patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob)
self.assertFalse(HostPluginProtocol.is_default_channel())
@patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state")
@patch("azurelinuxagent.common.protocol.hostplugin.HostPluginProtocol.ensure_initialized")
def test_upload_status_blob_unknown_type_assumes_block(self, _, __, *args):
vmstatus = VMStatus(message="Ready", status="Ready")
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.status_upload_blob = testurl
wire_protocol_client.ext_conf.status_upload_blob_type = "NotALegalType"
wire_protocol_client.status_blob.vm_status = vmstatus
with patch.object(WireClient, "get_goal_state") as patch_get_goal_state:
with patch.object(StatusBlob, "prepare") as patch_prepare:
with patch.object(StatusBlob, "upload") as patch_default_upload:
HostPluginProtocol.set_default_channel(False)
wire_protocol_client.upload_status_blob()
patch_prepare.assert_called_once_with("BlockBlob")
patch_default_upload.assert_called_once_with(testurl)
patch_get_goal_state.assert_called_once_with()
@patch("azurelinuxagent.common.protocol.wire.WireClient.update_goal_state")
def test_upload_status_blob_reports_prepare_error(self, *args):
vmstatus = VMStatus(message="Ready", status="Ready")
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.status_upload_blob = testurl
wire_protocol_client.ext_conf.status_upload_blob_type = testtype
wire_protocol_client.status_blob.vm_status = vmstatus
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
with patch.object(StatusBlob, "prepare", side_effect=Exception) as mock_prepare:
with patch.object(WireClient, "report_status_event") as mock_event:
self.assertRaises(ProtocolError, wire_protocol_client.upload_status_blob)
self.assertEqual(1, mock_prepare.call_count)
self.assertEqual(1, mock_event.call_count)
def test_get_in_vm_artifacts_profile_blob_not_available(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
# Test when artifacts_profile_blob is null/None
self.assertEqual(None, wire_protocol_client.get_artifacts_profile())
#Test when artifacts_profile_blob is whitespace
wire_protocol_client.ext_conf.artifacts_profile_blob = " "
self.assertEqual(None, wire_protocol_client.get_artifacts_profile())
def test_get_in_vm_artifacts_profile_response_body_not_valid(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.artifacts_profile_blob = testurl
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
wire_protocol_client.get_goal_state = Mock(return_value=goal_state)
with patch.object(HostPluginProtocol, "get_artifact_request",
return_value = ['dummy_url', {}]) as host_plugin_get_artifact_url_and_headers:
#Test when response body is None
wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(None, 200))
in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile()
self.assertTrue(in_vm_artifacts_profile is None)
#Test when response body is None
wire_protocol_client.call_storage_service = Mock(return_value=MockResponse(' '.encode('utf-8'), 200))
in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile()
self.assertTrue(in_vm_artifacts_profile is None)
#Test when response body is None
wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{ }'.encode('utf-8'), 200))
in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile()
self.assertEqual(dict(), in_vm_artifacts_profile.__dict__,
'If artifacts_profile_blob has empty json dictionary, in_vm_artifacts_profile '
'should contain nothing')
host_plugin_get_artifact_url_and_headers.assert_called_with(testurl)
def test_get_in_vm_artifacts_profile_default(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.artifacts_profile_blob = testurl
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
wire_protocol_client.get_goal_state = Mock(return_value=goal_state)
wire_protocol_client.call_storage_service = Mock(return_value=MockResponse('{"onHold": "true"}'.encode('utf-8'), 200))
in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile()
self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__)
self.assertTrue(in_vm_artifacts_profile.is_on_hold())
def test_fetch_manifest_fallback(self, *args):
uri1 = ExtHandlerVersionUri()
uri1.uri = 'ext_uri'
uris = DataContractList(ExtHandlerVersionUri)
uris.append(uri1)
host_uri = 'host_uri'
mock_host = HostPluginProtocol(host_uri,
'container_id',
'role_config')
client = WireProtocol(wireserver_url).client
with patch.object(WireClient,
"fetch",
return_value=None) as patch_fetch:
with patch.object(WireClient,
"get_host_plugin",
return_value=mock_host):
with patch.object(HostPluginProtocol,
"get_artifact_request",
return_value=[host_uri, {}]):
HostPluginProtocol.set_default_channel(False)
self.assertRaises(ProtocolError, client.fetch_manifest, uris)
self.assertEqual(patch_fetch.call_count, 2)
self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri)
self.assertEqual(patch_fetch.call_args_list[1][0][0], host_uri)
def test_get_in_vm_artifacts_profile_host_ga_plugin(self, *args):
wire_protocol_client = WireProtocol(wireserver_url).client
wire_protocol_client.ext_conf = ExtensionsConfig(None)
wire_protocol_client.ext_conf.artifacts_profile_blob = testurl
goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state)
wire_protocol_client.get_goal_state = Mock(return_value=goal_state)
wire_protocol_client.fetch = Mock(side_effect=[None, '{"onHold": "true"}'])
with patch.object(HostPluginProtocol,
"get_artifact_request",
return_value=['dummy_url', {}]) as artifact_request:
in_vm_artifacts_profile = wire_protocol_client.get_artifacts_profile()
self.assertTrue(in_vm_artifacts_profile is not None)
self.assertEqual(dict(onHold='true'), in_vm_artifacts_profile.__dict__)
self.assertTrue(in_vm_artifacts_profile.is_on_hold())
artifact_request.assert_called_once_with(testurl)
@patch("socket.gethostname", return_value="hostname")
@patch("time.gmtime", return_value=time.localtime(1485543256))
def test_report_vm_status(self, *args):
status = 'status'
message = 'message'
client = WireProtocol(wireserver_url).client
actual = StatusBlob(client=client)
actual.set_vm_status(VMStatus(status=status, message=message))
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
formatted_msg = {
'lang': 'en-US',
'message': message
}
v1_ga_status = {
'version': str(CURRENT_VERSION),
'status': status,
'formattedMessage': formatted_msg
}
v1_ga_guest_info = {
'computerName': socket.gethostname(),
'osName': DISTRO_NAME,
'osVersion': DISTRO_VERSION,
'version': str(CURRENT_VERSION),
}
v1_agg_status = {
'guestAgentStatus': v1_ga_status,
'handlerAggregateStatus': []
}
v1_vm_status = {
'version': '1.1',
'timestampUTC': timestamp,
'aggregateStatus': v1_agg_status,
'guestOSInfo' : v1_ga_guest_info
}
self.assertEqual(json.dumps(v1_vm_status), actual.to_json())
class MockResponse:
def __init__(self, body, status_code):
self.body = body
self.status = status_code
def read(self):
return self.body
if __name__ == '__main__':
unittest.main()
|
# -*- coding:utf-8 -*-
import os
from simpleutil.config import cfg
from simpleutil.log import log as logging
from simpleutil.utils import systemutils
from simpleutil.utils.zlibutils.excluder import Excluder
from simpleflow.api import load
from simpleflow.storage import Connection
from simpleflow.storage.middleware import LogBook
from simpleflow.engines.engine import ParallelActionEngine
from goperation.manager.rpc.agent import sqlite
from goperation.manager.rpc.agent.application.taskflow.application import AppFileUpgradeByFile
from goperation.manager.rpc.agent.application.taskflow.application import Application
from goperation.manager.rpc.agent.application.taskflow import pipe
from gogamechen1 import common
from gogamechen1.api.rpc.taskflow import GogameMiddle
from gogamechen1.api.rpc.taskflow import GogameAppFile
from gogamechen1.api.rpc.taskflow import GogameAppBackupFile
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SHELLZIPEXCLUDES = ['bin**', 'geology**']
SHELTAREXCLUDE = ['bin', 'geology']
class HOFIXExcluder(Excluder):
def __call__(self, compretype, shell=False):
"""find excluder function"""
if not shell:
raise TypeError('Just for shell extract')
if compretype == 'zip':
return HOFIXExcluder.unzip
elif compretype == 'gz':
return HOFIXExcluder.untar
else:
raise NotImplementedError('Can not extract %s file' % compretype)
@staticmethod
def unzip():
return SHELLZIPEXCLUDES
@staticmethod
def untar():
return SHELTAREXCLUDE
hofixexcluer = HOFIXExcluder()
def hotfix_entitys(appendpoint,
objtype, appfile,
entitys, timeline):
backupfile = None
download_time = 600
upzip_timeout = 600
md5 = appfile.get('md5')
backup = appfile.get('backup', True)
revertable = appfile.get('revertable', False)
rollback = appfile.get('rollback', True)
timeout = appfile.get('timeout')
if timeout < download_time:
download_time = timeout
if timeout < upzip_timeout:
upzip_timeout = timeout
stream = appfile.get('stream')
# 程序更新文件
upgradefile = GogameAppFile(md5, objtype, rollback=rollback,
revertable=revertable, stream=stream)
if backup:
# 备份entity在flow_factory随机抽取
outfile = os.path.join(appendpoint.endpoint_backup,
'%s.%s.%d.gz' % (objtype, common.APPFILE, timeline))
# 程序备份文件
backupfile = GogameAppBackupFile(outfile, objtype)
applications = []
middlewares = []
_updates = {}
for entity in entitys:
if objtype != appendpoint._objtype(entity):
raise ValueError('Entity not the same objtype')
middleware = GogameMiddle(endpoint=appendpoint, entity=entity, objtype=objtype)
middlewares.append(middleware)
_updates.clear()
upgradetask = AppFileUpgradeByFile(middleware, native=False, exclude=hofixexcluer,
rebind=['upgradefile', 'upzip_timeout'])
app = Application(middleware, upgradetask=upgradetask)
applications.append(app)
book = LogBook(name='hotfix_%s' % appendpoint.namespace)
store = dict(download_timeout=download_time, upzip_timeout=upzip_timeout)
taskflow_session = sqlite.get_taskflow_session()
upgrade_flow = pipe.flow_factory(taskflow_session, book,
applications=applications,
upgradefile=upgradefile,
backupfile=backupfile,
store=store)
connection = Connection(taskflow_session)
engine = load(connection, upgrade_flow, store=store,
book=book, engine_cls=ParallelActionEngine,
max_workers=4)
e = None
try:
engine.run()
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception('Hotfix task execute fail')
else:
LOG.error('Hotfix task execute fail, %s %s' % (e.__class__.__name__, str(e)))
finally:
connection.destroy_logbook(book.uuid)
if stream:
upgradefile.clean()
return middlewares, e
|
"""Shared test code for RAOP test cases."""
import asyncio
from typing import cast
import pytest
from pyatv import connect
from pyatv.conf import AppleTV, ManualService
from pyatv.const import Protocol
from tests.fake_device import FakeAppleTV, raop
from tests.fake_device.raop import FakeRaopUseCases
@pytest.fixture(name="raop_device")
async def raop_device_fixture(event_loop):
fake_atv = FakeAppleTV(event_loop, test_mode=False)
fake_atv.add_service(Protocol.RAOP)
await fake_atv.start()
yield fake_atv
await fake_atv.stop()
@pytest.fixture(name="raop_state")
async def raop_state_fixture(raop_device):
yield raop_device.get_state(Protocol.RAOP)
@pytest.fixture(name="raop_usecase")
async def raop_usecase_fixture(raop_device) -> FakeRaopUseCases:
yield cast(FakeRaopUseCases, raop_device.get_usecase(Protocol.RAOP))
@pytest.fixture(name="raop_conf")
def raop_conf_fixture(raop_device, raop_properties):
service = ManualService(
"raop_id", Protocol.RAOP, raop_device.get_port(Protocol.RAOP), raop_properties
)
conf = AppleTV("127.0.0.1", "Apple TV")
conf.add_service(service)
yield conf
@pytest.fixture(name="raop_client")
async def raop_client_fixture(raop_conf, event_loop):
client = await connect(raop_conf, loop=event_loop)
yield client
await asyncio.gather(*client.close())
|
from django.db import models
from polymorphic.base import PolymorphicModelBase
from polymorphic.models import PolymorphicModel
from .handlers import RouteViewHandler
from .managers import RouteManager
from .utils import import_from_dotted_path
from .validators import (
validate_end_in_slash,
validate_no_dotty_subpaths,
validate_no_double_slashes,
validate_no_hash_symbol,
validate_no_questionmark,
validate_start_in_slash,
)
class UnboundViewMeta(PolymorphicModelBase):
"""
Metaclass that wraps the `view` attribute with `staticmethod`.
This ensures that the view does not bind to the class unintentionally.
"""
def __new__(cls, name, bases, attrs):
"""
Create the new class.
Ensure any `view` attribute is a staticmethod is unbound to the class.
"""
view = attrs.get('view')
if view:
attrs['view'] = staticmethod(view)
return super().__new__(cls, name, bases, attrs)
class Route(PolymorphicModel, metaclass=UnboundViewMeta):
"""A Route in a tree of url endpoints."""
url = models.TextField(
db_index=True,
validators=[
validate_end_in_slash,
validate_start_in_slash,
validate_no_dotty_subpaths,
validate_no_double_slashes,
validate_no_hash_symbol,
validate_no_questionmark,
],
unique=True,
)
objects = RouteManager()
handler = RouteViewHandler.path()
def __str__(self):
"""Display a Route's class and url."""
return '{} @ {}'.format(self.__class__.__name__, self.url)
def get_descendants(self):
"""Get all the descendants of this Route."""
if not self.pk:
return Route.objects.none()
others = Route.objects.exclude(pk=self.pk)
descendants = others.filter(url__startswith=self.url)
return descendants.order_by('url')
def get_handler_class(self):
"""Import a class from the python path string in `self.handler`."""
return import_from_dotted_path(self.handler)
def get_handler(self):
"""
Get an instance of the handler for this Route instance.
Multiple calls to this method (on the same instance of Route) will
return the same instance of handler.
"""
try:
return self._handler
except AttributeError:
self._handler = self.get_handler_class()(self)
return self._handler
def handle(self, request, path):
"""
Delegate handling the request to the handler.
The path of this route is chopped off the url to save the handler from
needing to deal with it. If it really needs it, it will be able to
derive it from the route (self) that is passed to it on instantiation.
"""
handler = self.get_handler()
# Strip the route url from the rest of the path
path = path[len(self.url) - 1:]
# Deal with the request
return handler.handle(request, path)
|
import argparse
import os
import sltxpkg.util as su
from sltxpkg.command_config import Arg, Commands
from sltxpkg.commands import (cmd_analyze_logfile, cmd_auto_setup, cmd_cleanse,
cmd_compile, cmd_dependency, cmd_docker,
cmd_gen_gha, cmd_raw_compile, cmd_version)
def valid_file(arg: str) -> str:
if arg is None or arg.strip() == "":
raise ValueError("arg vas none or empty")
if not os.path.isfile(arg):
raise FileNotFoundError("\"" + arg + "\" must be an existing file")
return arg
file_help = "the file(s) to load; they will be processed."
file_tx = "file.tex"
sub_parser = Commands({
'analyze': ((cmd_analyze_logfile, ['log']),
Arg(description='Analyze a logfile for errors.'),
[
Arg('files', metavar='log.zip',
type=str, nargs='*', help=file_help)]
),
'dependency': ((cmd_dependency, ['dep']),
Arg(description='Install dependencies on the host system.'),
[
Arg('-l', '--local', metavar='path', dest='local_path', default=None,
help="This will install the dependency file into the given directory. This might be useful "
"if the compilation should be handled by an online editor. Use '.' to use the current "
"directory."),
Arg('deps', metavar='dep.yml', type=valid_file, nargs='+',
help="the file(s) to load the dependencies from.")
]),
'docker': ((cmd_docker, ['do']), Arg(description='Manage the containers to compile with sltx.'), []),
'compile': ((cmd_compile, ['cmp']), Arg(
description="Compile documents with previously installed containers. If docker was disabled this will default "
"to the same behavior as \"raw-compile\" passing on the recipe."),
[
Arg('-p', '--profile', dest='profile',
help="allows to override the configured docker profile. This will enable docker automatically."),
Arg('-r', '--recipe', dest='recipe',
help='The recipe to instruct the main compile routine.', required=False, default=None),
Arg('--root', dest='dock_as_root', action='store_true',
help='Run the docker container with the lithie-root setup. This may lead to permission errors '
'when you want to delete the caches.',
required=False),
Arg('-l', '--local-texmf', dest='local_texmf', action='store_true', default=False,
help='include the local texmf tree in the docker container', required=False),
Arg('--no-local-texmf', dest='local_texmf', action='store_false',
help='exclude the local texmf tree in the docker container', required=False),
Arg('-d', '--dependencies', dest='extra_dependencies', action='append', default=[],
help='additional dependency files to download before the installation. May be supplied '
'multiple times.',
required=False),
Arg('-a', '--args', action='append', metavar='ARGUMENT(S)', dest='extra_arguments', default=[],
help="Extra arguments. Make sure to prepend them with the appropriate '-' or '--', they will "
"be used as extra_arguments in the recipe."),
Arg('files', metavar=file_tx, type=str, nargs='*',
help=file_help)
]),
'raw-compile': ((cmd_raw_compile, ['raw-cmp']), Arg(
description="Compile documents using a recipe. This will not start any docker container but will be executed "
"inside one as well."),
[
Arg('-r', '--recipe', dest='recipe',
help='The recipe to instruct the main compile routine.', required=False, default=None),
Arg('files', metavar=file_tx, type=str, nargs='*',
help=file_help),
Arg('-a', '--args', action='append', metavar='ARGUMENT(S)', dest='extra_arguments', default=[],
help="Extra arguments. Make sure to prepend them with the appropriate '-' or '--', "
"they will be used as extra_arguments in the recipe."),
Arg('-d', '--dependency', dest='extra_dependencies', action='append', default=[],
help='additional dependency files to download before the installation. May be supplied '
'multiple times.',
required=False)
]),
'gen-gha': ((cmd_gen_gha, ['gha']), Arg(description='Generate a GitHub workflow To automate compilation.'), []),
'cleanse': ((cmd_cleanse, ['cls']), Arg(
description="This will clean all additional sltx-files in the current directory (like \"sltx-log-*\" files). "
"It may clean more, if you pass the corresponding flags. Please note, that cleanse will only read "
"the current config. If you've changed some configurations they will be used."),
[
Arg('-C', '--cache', dest='cleanse_cache', action='store_true',
help="If set, sltx will clean the caches."),
Arg('--all', dest='cleanse_all', action='store_true',
help="If set, sltx will clean the texmf-tree (sltx) and the complete cache as-well."),
Arg('-e', '--exclude', action='append', metavar='pattern', dest='exclude_patterns',
help="Exclude all files/directories matching this pattern. May be supplied multiple times."),
Arg('-i', '--include', action='append', metavar='pattern', dest='include_patterns',
help="Include *only* files/directories matching this pattern. May be supplied multiple times.")
]),
'auto-setup': (
(cmd_auto_setup, []), Arg(
description='Setup a basic version of sltx (this requires docker to be setup).'),
[
Arg('-d', '--dependencies', dest='auto_deps', action='store_true',
help='This will install the recommended dependencies on your host system. This is helpful if you have '
'texlive installed and want your editor to recognize the libraries as well.')
]),
'version': ((cmd_version, []), Arg(description='Show the version-info for sltx.'), [])
})
parser = argparse.ArgumentParser(
description="sltx, a Simple LaTeX utility", epilog="sltx Version: " + su.get_version())
# commands, parser.add_mutually_exclusive_group()
parser.add_argument('-c', '--config', dest='config', metavar='config.yml',
required=False, type=valid_file,
help="the file to load the configuration from.")
parser.add_argument('-t', '--threads', metavar='N', dest='threads', type=int,
help="number of threads to run the installation. Default is 1. This number will only affect some "
"routines.",
default=-1)
parser.add_argument('-q', '--quiet', dest='quiet',
required=False, action='store_true',
help="Set the flag if output is to be reduced")
parser.add_argument('-v', '--verbose', dest='verbose',
required=False, action='store_true',
help="Output in verbose mode -> more information from sltx")
parser.add_argument('--log', dest='log',
required=False, action='store_true',
help="Write to logfile. This is experimental.")
# TODO: Format support with mlatexformat?
cmd_parser = parser.add_subparsers(title='command', description="Select the command for sltx", metavar=set(sub_parser.cmds.keys()),
help="Help for the specific command. You may use shortcuts for them: " +
str([k[1] for k in sub_parser.helper_values() if len(
k[1]) != 0]),
dest='command')
sub_parser.generate(cmd_parser)
|
"""Tests for the 'ihate' plugin"""
from _common import unittest
from beets import importer
from beets.library import Item
from beetsplug.ihate import IHatePlugin
class IHatePluginTest(unittest.TestCase):
def test_hate(self):
match_pattern = {}
test_item = Item(
genre='TestGenre',
album=u'TestAlbum',
artist=u'TestArtist')
task = importer.SingletonImportTask(None, test_item)
# Empty query should let it pass.
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# 1 query match.
match_pattern = ["artist:bad_artist", "artist:TestArtist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# 2 query matches, either should trigger.
match_pattern = ["album:test", "artist:testartist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# Query is blocked by AND clause.
match_pattern = ["album:notthis genre:testgenre"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Both queries are blocked by AND clause with unmatched condition.
match_pattern = ["album:notthis genre:testgenre",
"artist:testartist album:notthis"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Only one query should fire.
match_pattern = ["album:testalbum genre:testgenre",
"artist:testartist album:notthis"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
#
# Base solver class
#
import casadi
import copy
import pybamm
import numbers
import numpy as np
import sys
import itertools
import multiprocessing as mp
import warnings
class BaseSolver(object):
"""Solve a discretised model.
Parameters
----------
method : str, optional
The method to use for integration, specific to each solver
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
root_method : str or pybamm algebraic solver class, optional
The method to use to find initial conditions (for DAE solvers).
If a solver class, must be an algebraic solver class.
If "casadi",
the solver uses casadi's Newton rootfinding algorithm to find initial
conditions. Otherwise, the solver uses 'scipy.optimize.root' with method
specified by 'root_method' (e.g. "lm", "hybr", ...)
root_tol : float, optional
The tolerance for the initial-condition solver (default is 1e-6).
extrap_tol : float, optional
The tolerance to assert whether extrapolation occurs or not. Default is 0.
"""
def __init__(
self,
method=None,
rtol=1e-6,
atol=1e-6,
root_method=None,
root_tol=1e-6,
extrap_tol=0,
max_steps="deprecated",
):
self._method = method
self._rtol = rtol
self._atol = atol
self.root_tol = root_tol
self.root_method = root_method
self.extrap_tol = extrap_tol
if max_steps != "deprecated":
raise ValueError(
"max_steps has been deprecated, and should be set using the "
"solver-specific extra-options dictionaries instead"
)
self.models_set_up = {}
# Defaults, can be overwritten by specific solver
self.name = "Base solver"
self.ode_solver = False
self.algebraic_solver = False
@property
def method(self):
return self._method
@method.setter
def method(self, value):
self._method = value
@property
def rtol(self):
return self._rtol
@rtol.setter
def rtol(self, value):
self._rtol = value
@property
def atol(self):
return self._atol
@atol.setter
def atol(self, value):
self._atol = value
@property
def root_method(self):
return self._root_method
@root_method.setter
def root_method(self, method):
if method == "casadi":
method = pybamm.CasadiAlgebraicSolver(self.root_tol)
elif isinstance(method, str):
method = pybamm.AlgebraicSolver(method, self.root_tol)
elif not (
method is None
or (
isinstance(method, pybamm.BaseSolver)
and method.algebraic_solver is True
)
):
raise pybamm.SolverError("Root method must be an algebraic solver")
self._root_method = method
@property
def root_tol(self):
return self._root_tol
@root_tol.setter
def root_tol(self, tol):
self._root_tol = tol
def copy(self):
"""Returns a copy of the solver"""
new_solver = copy.copy(self)
# clear models_set_up
new_solver.models_set_up = {}
return new_solver
def set_up(self, model, inputs=None, t_eval=None):
"""Unpack model, perform checks, and calculate jacobian.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
inputs_dict : dict, optional
Any input parameters to pass to the model when solving
t_eval : numeric type, optional
The times (in seconds) at which to compute the solution
"""
pybamm.logger.info("Start solver set-up")
# Check model.algebraic for ode solvers
if self.ode_solver is True and len(model.algebraic) > 0:
raise pybamm.SolverError(
"Cannot use ODE solver '{}' to solve DAE model".format(self.name)
)
# Check model.rhs for algebraic solvers
if self.algebraic_solver is True and len(model.rhs) > 0:
raise pybamm.SolverError(
"""Cannot use algebraic solver to solve model with time derivatives"""
)
# casadi solver won't allow solving algebraic model so we have to raise an
# error here
if isinstance(self, pybamm.CasadiSolver) and len(model.rhs) == 0:
raise pybamm.SolverError(
"Cannot use CasadiSolver to solve algebraic model, "
"use CasadiAlgebraicSolver instead"
)
# Discretise model if it isn't already discretised
# This only works with purely 0D models, as otherwise the mesh and spatial
# method should be specified by the user
if model.is_discretised is False:
try:
disc = pybamm.Discretisation()
disc.process_model(model)
except pybamm.DiscretisationError as e:
raise pybamm.DiscretisationError(
"Cannot automatically discretise model, "
"model should be discretised before solving ({})".format(e)
)
inputs = inputs or {}
# Set model timescale
model.timescale_eval = model.timescale.evaluate(inputs=inputs)
# Set model lengthscales
model.length_scales_eval = {
domain: scale.evaluate(inputs=inputs)
for domain, scale in model.length_scales.items()
}
if (
isinstance(self, (pybamm.CasadiSolver, pybamm.CasadiAlgebraicSolver))
) and model.convert_to_format != "casadi":
pybamm.logger.warning(
"Converting {} to CasADi for solving with CasADi solver".format(
model.name
)
)
model.convert_to_format = "casadi"
if (
isinstance(self.root_method, pybamm.CasadiAlgebraicSolver)
and model.convert_to_format != "casadi"
):
pybamm.logger.warning(
"Converting {} to CasADi for calculating ICs with CasADi".format(
model.name
)
)
model.convert_to_format = "casadi"
if model.convert_to_format != "casadi":
# Create Jacobian from concatenated rhs and algebraic
y = pybamm.StateVector(slice(0, model.concatenated_initial_conditions.size))
# set up Jacobian object, for re-use of dict
jacobian = pybamm.Jacobian()
else:
# Convert model attributes to casadi
t_casadi = casadi.MX.sym("t")
y_diff = casadi.MX.sym("y_diff", model.concatenated_rhs.size)
y_alg = casadi.MX.sym("y_alg", model.concatenated_algebraic.size)
y_casadi = casadi.vertcat(y_diff, y_alg)
p_casadi = {}
for name, value in inputs.items():
if isinstance(value, numbers.Number):
p_casadi[name] = casadi.MX.sym(name)
else:
p_casadi[name] = casadi.MX.sym(name, value.shape[0])
p_casadi_stacked = casadi.vertcat(*[p for p in p_casadi.values()])
def process(func, name, use_jacobian=None):
def report(string):
# don't log event conversion
if "event" not in string:
pybamm.logger.verbose(string)
if use_jacobian is None:
use_jacobian = model.use_jacobian
if model.convert_to_format != "casadi":
# Process with pybamm functions
if model.convert_to_format == "jax":
report(f"Converting {name} to jax")
jax_func = pybamm.EvaluatorJax(func)
if use_jacobian:
report(f"Calculating jacobian for {name}")
jac = jacobian.jac(func, y)
if model.convert_to_format == "python":
report(f"Converting jacobian for {name} to python")
jac = pybamm.EvaluatorPython(jac)
elif model.convert_to_format == "jax":
report(f"Converting jacobian for {name} to jax")
jac = jax_func.get_jacobian()
jac = jac.evaluate
else:
jac = None
if model.convert_to_format == "python":
report(f"Converting {name} to python")
func = pybamm.EvaluatorPython(func)
if model.convert_to_format == "jax":
report(f"Converting {name} to jax")
func = jax_func
func = func.evaluate
else:
# Process with CasADi
report(f"Converting {name} to CasADi")
func = func.to_casadi(t_casadi, y_casadi, inputs=p_casadi)
if use_jacobian:
report(f"Calculating jacobian for {name} using CasADi")
jac_casadi = casadi.jacobian(func, y_casadi)
jac = casadi.Function(
name, [t_casadi, y_casadi, p_casadi_stacked], [jac_casadi]
)
else:
jac = None
func = casadi.Function(
name, [t_casadi, y_casadi, p_casadi_stacked], [func]
)
if name == "residuals":
func_call = Residuals(func, name, model)
else:
func_call = SolverCallable(func, name, model)
if jac is not None:
jac_call = SolverCallable(jac, name + "_jac", model)
else:
jac_call = None
return func, func_call, jac_call
# Check for heaviside and modulo functions in rhs and algebraic and add
# discontinuity events if these exist.
# Note: only checks for the case of t < X, t <= X, X < t, or X <= t, but also
# accounts for the fact that t might be dimensional
# Only do this for DAE models as ODE models can deal with discontinuities fine
if len(model.algebraic) > 0:
for symbol in itertools.chain(
model.concatenated_rhs.pre_order(),
model.concatenated_algebraic.pre_order(),
):
if isinstance(symbol, pybamm.Heaviside):
found_t = False
# Dimensionless
if symbol.right.id == pybamm.t.id:
expr = symbol.left
found_t = True
elif symbol.left.id == pybamm.t.id:
expr = symbol.right
found_t = True
# Dimensional
elif symbol.right.id == (pybamm.t * model.timescale_eval).id:
expr = symbol.left.new_copy() / symbol.right.right.new_copy()
found_t = True
elif symbol.left.id == (pybamm.t * model.timescale_eval).id:
expr = symbol.right.new_copy() / symbol.left.right.new_copy()
found_t = True
# Update the events if the heaviside function depended on t
if found_t:
model.events.append(
pybamm.Event(
str(symbol),
expr.new_copy(),
pybamm.EventType.DISCONTINUITY,
)
)
elif isinstance(symbol, pybamm.Modulo):
found_t = False
# Dimensionless
if symbol.left.id == pybamm.t.id:
expr = symbol.right
found_t = True
# Dimensional
elif symbol.left.id == (pybamm.t * model.timescale_eval).id:
expr = symbol.right.new_copy() / symbol.left.right.new_copy()
found_t = True
# Update the events if the modulo function depended on t
if found_t:
if t_eval is None:
N_events = 200
else:
N_events = t_eval[-1] // expr.value
for i in np.arange(N_events):
model.events.append(
pybamm.Event(
str(symbol),
expr.new_copy() * pybamm.Scalar(i + 1),
pybamm.EventType.DISCONTINUITY,
)
)
# Process initial conditions
initial_conditions = process(
model.concatenated_initial_conditions,
"initial_conditions",
use_jacobian=False,
)[0]
init_eval = InitialConditions(initial_conditions, model)
# Process rhs, algebraic and event expressions
rhs, rhs_eval, jac_rhs = process(model.concatenated_rhs, "RHS")
algebraic, algebraic_eval, jac_algebraic = process(
model.concatenated_algebraic, "algebraic"
)
terminate_events_eval = [
process(event.expression, "event", use_jacobian=False)[1]
for event in model.events
if event.event_type == pybamm.EventType.TERMINATION
]
interpolant_extrapolation_events_eval = [
process(event.expression, "event", use_jacobian=False)[1]
for event in model.events
if event.event_type == pybamm.EventType.INTERPOLANT_EXTRAPOLATION
]
# discontinuity events are evaluated before the solver is called, so don't need
# to process them
discontinuity_events_eval = [
event
for event in model.events
if event.event_type == pybamm.EventType.DISCONTINUITY
]
# Add the solver attributes
model.init_eval = init_eval
model.rhs_eval = rhs_eval
model.algebraic_eval = algebraic_eval
model.jac_algebraic_eval = jac_algebraic
model.terminate_events_eval = terminate_events_eval
model.discontinuity_events_eval = discontinuity_events_eval
model.interpolant_extrapolation_events_eval = (
interpolant_extrapolation_events_eval
)
# Calculate initial conditions
model.y0 = init_eval(inputs)
# Save CasADi functions for the CasADi solver
# Note: when we pass to casadi the ode part of the problem must be in explicit
# form so we pre-multiply by the inverse of the mass matrix
if isinstance(self.root_method, pybamm.CasadiAlgebraicSolver) or isinstance(
self, (pybamm.CasadiSolver, pybamm.CasadiAlgebraicSolver)
):
# can use DAE solver to solve model with algebraic equations only
if len(model.rhs) > 0:
mass_matrix_inv = casadi.MX(model.mass_matrix_inv.entries)
explicit_rhs = mass_matrix_inv @ rhs(
t_casadi, y_casadi, p_casadi_stacked
)
model.casadi_rhs = casadi.Function(
"rhs", [t_casadi, y_casadi, p_casadi_stacked], [explicit_rhs]
)
model.casadi_algebraic = algebraic
if len(model.rhs) == 0:
# No rhs equations: residuals is algebraic only
model.residuals_eval = Residuals(algebraic, "residuals", model)
model.jacobian_eval = jac_algebraic
elif len(model.algebraic) == 0:
# No algebraic equations: residuals is rhs only
model.residuals_eval = Residuals(rhs, "residuals", model)
model.jacobian_eval = jac_rhs
# Calculate consistent initial conditions for the algebraic equations
else:
all_states = pybamm.NumpyConcatenation(
model.concatenated_rhs, model.concatenated_algebraic
)
# Process again, uses caching so should be quick
residuals_eval, jacobian_eval = process(all_states, "residuals")[1:]
model.residuals_eval = residuals_eval
model.jacobian_eval = jacobian_eval
pybamm.logger.info("Finish solver set-up")
def _set_initial_conditions(self, model, inputs, update_rhs):
"""
Set initial conditions for the model. This is skipped if the solver is an
algebraic solver (since this would make the algebraic solver redundant), and if
the model doesn't have any algebraic equations (since there are no initial
conditions to be calculated in this case).
Parameters
----------
model : :class:`pybamm.BaseModel`
The model for which to calculate initial conditions.
inputs : dict
Any input parameters to pass to the model when solving
update_rhs : bool
Whether to update the rhs. True for 'solve', False for 'step'.
"""
if self.algebraic_solver is True:
# Don't update model.y0
return None
elif len(model.algebraic) == 0:
if update_rhs is True:
# Recalculate initial conditions for the rhs equations
model.y0 = model.init_eval(inputs)
else:
# Don't update model.y0
return None
else:
if update_rhs is True:
# Recalculate initial conditions for the rhs equations
y0_from_inputs = model.init_eval(inputs)
# Reuse old solution for algebraic equations
y0_from_model = model.y0
len_rhs = model.concatenated_rhs.size
# update model.y0, which is used for initialising the algebraic solver
if len_rhs == 0:
model.y0 = y0_from_model
else:
model.y0 = casadi.vertcat(
y0_from_inputs[:len_rhs], y0_from_model[len_rhs:]
)
model.y0 = self.calculate_consistent_state(model, 0, inputs)
def calculate_consistent_state(self, model, time=0, inputs=None):
"""
Calculate consistent state for the algebraic equations through
root-finding. model.y0 is used as the initial guess for rootfinding
Parameters
----------
model : :class:`pybamm.BaseModel`
The model for which to calculate initial conditions.
time : float
The time at which to calculate the states
inputs_dict : dict, optional
Any input parameters to pass to the model when solving
Returns
-------
y0_consistent : array-like, same shape as y0_guess
Initial conditions that are consistent with the algebraic equations (roots
of the algebraic equations). If self.root_method == None then returns
model.y0.
"""
pybamm.logger.debug("Start calculating consistent states")
if self.root_method is None:
return model.y0
try:
root_sol = self.root_method._integrate(model, [time], inputs)
except pybamm.SolverError as e:
raise pybamm.SolverError(
"Could not find consistent states: {}".format(e.args[0])
)
pybamm.logger.debug("Found consistent states")
y0 = root_sol.all_ys[0]
if isinstance(y0, np.ndarray):
y0 = y0.flatten()
return y0
def solve(
self,
model,
t_eval=None,
external_variables=None,
inputs=None,
initial_conditions=None,
nproc=None,
):
"""
Execute the solver setup and calculate the solution of the model at
specified times.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
t_eval : numeric type
The times (in seconds) at which to compute the solution
external_variables : dict
A dictionary of external variables and their corresponding
values at the current time
inputs : dict or list, optional
A dictionary or list of dictionaries describing any input parameters to
pass to the model when solving
initial_conditions : :class:`pybamm.Symbol`, optional
Initial conditions to use when solving the model. If None (default),
`model.concatenated_initial_conditions` is used. Otherwise, must be a symbol
of size `len(model.rhs) + len(model.algebraic)`.
nproc : int, optional
Number of processes to use when solving for more than one set of input
parameters. Defaults to value returned by "os.cpu_count()".
Returns
-------
:class:`pybamm.Solution` or list of :class:`pybamm.Solution` objects.
If type of `inputs` is `list`, return a list of corresponding
:class:`pybamm.Solution` objects.
Raises
------
:class:`pybamm.ModelError`
If an empty model is passed (`model.rhs = {}` and `model.algebraic={}` and
`model.variables = {}`)
"""
pybamm.logger.info("Start solving {} with {}".format(model.name, self.name))
# Make sure model isn't empty
if len(model.rhs) == 0 and len(model.algebraic) == 0:
if not isinstance(self, pybamm.DummySolver):
raise pybamm.ModelError(
"Cannot solve empty model, use `pybamm.DummySolver` instead"
)
# t_eval can only be None if the solver is an algebraic solver. In that case
# set it to 0
if t_eval is None:
if self.algebraic_solver is True:
t_eval = np.array([0])
else:
raise ValueError("t_eval cannot be None")
# If t_eval is provided as [t0, tf] return the solution at 100 points
elif isinstance(t_eval, list):
if len(t_eval) == 1 and self.algebraic_solver is True:
pass
elif len(t_eval) != 2:
raise pybamm.SolverError(
"'t_eval' can be provided as an array of times at which to "
"return the solution, or as a list [t0, tf] where t0 is the "
"initial time and tf is the final time, but has been provided "
"as a list of length {}.".format(len(t_eval))
)
else:
t_eval = np.linspace(t_eval[0], t_eval[-1], 100)
# Make sure t_eval is monotonic
if (np.diff(t_eval) < 0).any():
raise pybamm.SolverError("t_eval must increase monotonically")
# Set up external variables and inputs
#
# Argument "inputs" can be either a list of input dicts or
# a single dict. The remaining of this function is only working
# with variable "input_list", which is a list of dictionaries.
# If "inputs" is a single dict, "inputs_list" is a list of only one dict.
inputs_list = inputs if isinstance(inputs, list) else [inputs]
ext_and_inputs_list = [
self._set_up_ext_and_inputs(model, external_variables, inputs)
for inputs in inputs_list
]
# Cannot use multiprocessing with model in "jax" format
if (len(inputs_list) > 1) and model.convert_to_format == "jax":
raise pybamm.SolverError(
"Cannot solve list of inputs with multiprocessing "
'when model in format "jax".'
)
# Set up (if not done already)
timer = pybamm.Timer()
if model not in self.models_set_up:
# It is assumed that when len(inputs_list) > 1, model set
# up (initial condition, time-scale and length-scale) does
# not depend on input parameters. Thefore only `ext_and_inputs[0]`
# is passed to `set_up`.
# See https://github.com/pybamm-team/PyBaMM/pull/1261
self.set_up(model, ext_and_inputs_list[0], t_eval)
self.models_set_up.update(
{model: {"initial conditions": model.concatenated_initial_conditions}}
)
else:
ics_set_up = self.models_set_up[model]["initial conditions"]
# Check that initial conditions have not been updated
if ics_set_up.id != model.concatenated_initial_conditions.id:
# If the new initial conditions are different, set up again
# Doing the whole setup again might be slow, but no need to prematurely
# optimize this
self.set_up(model, ext_and_inputs_list[0], t_eval)
self.models_set_up[model][
"initial conditions"
] = model.concatenated_initial_conditions
set_up_time = timer.time()
timer.reset()
# (Re-)calculate consistent initial conditions
# Assuming initial conditions do not depend on input parameters
# when len(inputs_list) > 1, only `ext_and_inputs_list[0]`
# is passed to `_set_initial_conditions`.
# See https://github.com/pybamm-team/PyBaMM/pull/1261
if len(inputs_list) > 1:
all_inputs_names = set(
itertools.chain.from_iterable(
[ext_and_inputs.keys() for ext_and_inputs in ext_and_inputs_list]
)
)
initial_conditions_node_names = set(
[it.name for it in model.concatenated_initial_conditions.pre_order()]
)
if all_inputs_names.issubset(initial_conditions_node_names):
raise pybamm.SolverError(
"Input parameters cannot appear in expression "
"for initial conditions."
)
self._set_initial_conditions(model, ext_and_inputs_list[0], update_rhs=True)
# Non-dimensionalise time
t_eval_dimensionless = t_eval / model.timescale_eval
# Calculate discontinuities
discontinuities = [
# Assuming that discontinuities do not depend on
# input parameters when len(input_list) > 1, only
# `input_list[0]` is passed to `evaluate`.
# See https://github.com/pybamm-team/PyBaMM/pull/1261
event.expression.evaluate(inputs=inputs_list[0])
for event in model.discontinuity_events_eval
]
# make sure they are increasing in time
discontinuities = sorted(discontinuities)
# remove any identical discontinuities
discontinuities = [
v
for i, v in enumerate(discontinuities)
if (
i == len(discontinuities) - 1
or discontinuities[i] < discontinuities[i + 1]
)
and v > 0
]
# remove any discontinuities after end of t_eval
discontinuities = [v for v in discontinuities if v < t_eval_dimensionless[-1]]
if len(discontinuities) > 0:
pybamm.logger.verbose(
"Discontinuity events found at t = {}".format(discontinuities)
)
if isinstance(inputs, list):
raise pybamm.SolverError(
"Cannot solve for a list of input parameters"
" sets with discontinuities"
)
else:
pybamm.logger.verbose("No discontinuity events found")
# insert time points around discontinuities in t_eval
# keep track of sub sections to integrate by storing start and end indices
start_indices = [0]
end_indices = []
eps = sys.float_info.epsilon
for dtime in discontinuities:
dindex = np.searchsorted(t_eval_dimensionless, dtime, side="left")
end_indices.append(dindex + 1)
start_indices.append(dindex + 1)
if dtime - eps < t_eval_dimensionless[dindex] < dtime + eps:
t_eval_dimensionless[dindex] += eps
t_eval_dimensionless = np.insert(
t_eval_dimensionless, dindex, dtime - eps
)
else:
t_eval_dimensionless = np.insert(
t_eval_dimensionless, dindex, [dtime - eps, dtime + eps]
)
end_indices.append(len(t_eval_dimensionless))
# Integrate separately over each time segment and accumulate into the solution
# object, restarting the solver at each discontinuity (and recalculating a
# consistent state afterwards if a DAE)
old_y0 = model.y0
solutions = None
for start_index, end_index in zip(start_indices, end_indices):
pybamm.logger.verbose(
"Calling solver for {} < t < {}".format(
t_eval_dimensionless[start_index] * model.timescale_eval,
t_eval_dimensionless[end_index - 1] * model.timescale_eval,
)
)
ninputs = len(ext_and_inputs_list)
if ninputs == 1:
new_solution = self._integrate(
model,
t_eval_dimensionless[start_index:end_index],
ext_and_inputs_list[0],
)
new_solutions = [new_solution]
else:
with mp.Pool(processes=nproc) as p:
new_solutions = p.starmap(
self._integrate,
zip(
[model] * ninputs,
[t_eval_dimensionless[start_index:end_index]] * ninputs,
ext_and_inputs_list,
),
)
p.close()
p.join()
# Setting the solve time for each segment.
# pybamm.Solution.__add__ assumes attribute solve_time.
solve_time = timer.time()
for sol in new_solutions:
sol.solve_time = solve_time
if start_index == start_indices[0]:
solutions = [sol for sol in new_solutions]
else:
for i, new_solution in enumerate(new_solutions):
solutions[i] = solutions[i] + new_solution
if solutions[0].termination != "final time":
break
if end_index != len(t_eval_dimensionless):
# setup for next integration subsection
last_state = solutions[0].y[:, -1]
# update y0 (for DAE solvers, this updates the initial guess for the
# rootfinder)
model.y0 = last_state
if len(model.algebraic) > 0:
model.y0 = self.calculate_consistent_state(
model, t_eval_dimensionless[end_index], ext_and_inputs_list[0]
)
solve_time = timer.time()
for i, solution in enumerate(solutions):
# Check if extrapolation occurred
extrapolation = self.check_extrapolation(solution, model.events)
if extrapolation:
warnings.warn(
"While solving {} extrapolation occurred for {}".format(
model.name, extrapolation
),
pybamm.SolverWarning,
)
# Identify the event that caused termination and update the solution to
# include the event time and state
solutions[i], termination = self.get_termination_reason(
solution, model.events
)
# Assign times
solutions[i].set_up_time = set_up_time
# all solutions get the same solve time, but their integration time
# will be different (see https://github.com/pybamm-team/PyBaMM/pull/1261)
solutions[i].solve_time = solve_time
# Restore old y0
model.y0 = old_y0
# Report times
if len(solutions) == 1:
pybamm.logger.info("Finish solving {} ({})".format(model.name, termination))
pybamm.logger.info(
(
"Set-up time: {}, Solve time: {} (of which integration time: {}), "
"Total time: {}"
).format(
solutions[0].set_up_time,
solutions[0].solve_time,
solutions[0].integration_time,
solutions[0].total_time,
)
)
else:
pybamm.logger.info("Finish solving {} for all inputs".format(model.name))
pybamm.logger.info(
("Set-up time: {}, Solve time: {}, Total time: {}").format(
solutions[0].set_up_time,
solutions[0].solve_time,
solutions[0].total_time,
)
)
# Raise error if solutions[0] only contains one timestep (except for algebraic
# solvers, where we may only expect one time in the solution)
if (
self.algebraic_solver is False
and len(solution.all_ts) == 1
and len(solution.all_ts[0]) == 1
):
raise pybamm.SolverError(
"Solution time vector has length 1. "
"Check whether simulation terminated too early."
)
# Return solution(s)
if ninputs == 1:
return solutions[0]
else:
return solutions
def step(
self,
old_solution,
model,
dt,
npts=2,
external_variables=None,
inputs=None,
save=True,
):
"""
Step the solution of the model forward by a given time increment. The
first time this method is called it executes the necessary setup by
calling `self.set_up(model)`.
Parameters
----------
old_solution : :class:`pybamm.Solution` or None
The previous solution to be added to. If `None`, a new solution is created.
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
dt : numeric type
The timestep (in seconds) over which to step the solution
npts : int, optional
The number of points at which the solution will be returned during
the step dt. default is 2 (returns the solution at t0 and t0 + dt).
external_variables : dict
A dictionary of external variables and their corresponding
values at the current time
inputs_dict : dict, optional
Any input parameters to pass to the model when solving
save : bool
Turn on to store the solution of all previous timesteps
Raises
------
:class:`pybamm.ModelError`
If an empty model is passed (`model.rhs = {}` and `model.algebraic = {}` and
`model.variables = {}`)
"""
if old_solution is not None and not (
old_solution.termination == "final time"
or "[experiment]" in old_solution.termination
):
# Return same solution as an event has already been triggered
# With hack to allow stepping past experiment current / voltage cut-off
return old_solution
# Make sure model isn't empty
if len(model.rhs) == 0 and len(model.algebraic) == 0:
if not isinstance(self, pybamm.DummySolver):
raise pybamm.ModelError(
"Cannot step empty model, use `pybamm.DummySolver` instead"
)
# Make sure dt is positive
if dt <= 0:
raise pybamm.SolverError("Step time must be positive")
# Set timer
timer = pybamm.Timer()
# Set up external variables and inputs
external_variables = external_variables or {}
inputs = inputs or {}
ext_and_inputs = {**external_variables, **inputs}
# Check that any inputs that may affect the scaling have not changed
# Set model timescale
temp_timescale_eval = model.timescale.evaluate(inputs=inputs)
# Set model lengthscales
temp_length_scales_eval = {
domain: scale.evaluate(inputs=inputs)
for domain, scale in model.length_scales.items()
}
if old_solution is not None:
if temp_timescale_eval != old_solution.timescale_eval:
raise pybamm.SolverError(
"The model timescale is a function of an input parameter "
"and the value has changed between steps!"
)
for domain in temp_length_scales_eval.keys():
old_dom_eval = old_solution.length_scales_eval[domain]
if temp_length_scales_eval[domain] != old_dom_eval:
pybamm.logger.error(
"The {} domain lengthscale is a function of an input "
"parameter and the value has changed between "
"steps!".format(domain)
)
# Run set up on first step
if old_solution is None:
pybamm.logger.verbose(
"Start stepping {} with {}".format(model.name, self.name)
)
self.set_up(model, ext_and_inputs)
t = 0.0
else:
# initialize with old solution
t = old_solution.all_ts[-1][-1]
model.y0 = old_solution.all_ys[-1][:, -1]
set_up_time = timer.time()
# (Re-)calculate consistent initial conditions
self._set_initial_conditions(model, ext_and_inputs, update_rhs=False)
# Non-dimensionalise dt
dt_dimensionless = dt / model.timescale_eval
# Step
t_eval = np.linspace(t, t + dt_dimensionless, npts)
pybamm.logger.verbose(
"Stepping for {:.0f} < t < {:.0f}".format(
t * model.timescale_eval,
(t + dt_dimensionless) * model.timescale_eval,
)
)
timer.reset()
solution = self._integrate(model, t_eval, ext_and_inputs)
solution.solve_time = timer.time()
# Check if extrapolation occurred
extrapolation = self.check_extrapolation(solution, model.events)
if extrapolation:
warnings.warn(
"While solving {} extrapolation occurred for {}".format(
model.name, extrapolation
),
pybamm.SolverWarning,
)
# Identify the event that caused termination and update the solution to
# include the event time and state
solution, termination = self.get_termination_reason(solution, model.events)
# Assign setup time
solution.set_up_time = set_up_time
# Report times
pybamm.logger.verbose("Finish stepping {} ({})".format(model.name, termination))
pybamm.logger.verbose(
(
"Set-up time: {}, Step time: {} (of which integration time: {}), "
"Total time: {}"
).format(
solution.set_up_time,
solution.solve_time,
solution.integration_time,
solution.total_time,
)
)
# Return solution
if save is False or old_solution is None:
return solution
else:
return old_solution + solution
def get_termination_reason(self, solution, events):
"""
Identify the cause for termination. In particular, if the solver terminated
due to an event, (try to) pinpoint which event was responsible. If an event
occurs the event time and state are added to the solution object.
Note that the current approach (evaluating all the events and then finding which
one is smallest at the final timestep) is pretty crude, but is the easiest one
that works for all the different solvers.
Parameters
----------
solution : :class:`pybamm.Solution`
The solution object
events : dict
Dictionary of events
"""
if solution.termination == "final time":
return (
solution,
"the solver successfully reached the end of the integration interval",
)
elif solution.termination == "event":
# Get final event value
final_event_values = {}
for event in events:
if event.event_type == pybamm.EventType.TERMINATION:
final_event_values[event.name] = abs(
event.expression.evaluate(
solution.t_event,
solution.y_event,
inputs=solution.all_inputs[-1],
)
)
termination_event = min(final_event_values, key=final_event_values.get)
# Add the event to the solution object
solution.termination = "event: {}".format(termination_event)
# Update t, y and inputs to include event time and state
# Note: if the final entry of t is equal to the event time we skip
# this (having duplicate entries causes an error later in ProcessedVariable)
if solution.t_event != solution.all_ts[-1][-1]:
event_sol = pybamm.Solution(
solution.t_event,
solution.y_event,
solution.model,
solution.all_inputs[-1],
solution.t_event,
solution.y_event,
solution.termination,
)
event_sol.solve_time = 0
event_sol.integration_time = 0
solution = solution + event_sol
return solution, solution.termination
elif solution.termination == "success":
return solution, solution.termination
def check_extrapolation(self, solution, events):
"""
Check if extrapolation occurred for any of the interpolants. Note that with the
current approach (evaluating all the events at the solution times) some
extrapolations might not be found if they only occurred for a small period of
time.
Parameters
----------
solution : :class:`pybamm.Solution`
The solution object
events : dict
Dictionary of events
"""
extrap_events = {}
for event in events:
if event.event_type == pybamm.EventType.INTERPOLANT_EXTRAPOLATION:
# First set to False, then loop through and change to True if any
# events extrapolate
extrap_events[event.name] = False
# This might be a little bit slow but is ok for now
for ts, ys, inputs in zip(
solution.all_ts, solution.all_ys, solution.all_inputs
):
for inner_idx, t in enumerate(ts):
y = ys[:, inner_idx]
if isinstance(y, casadi.DM):
y = y.full()
if (
event.expression.evaluate(t, y, inputs=inputs)
< self.extrap_tol
):
extrap_events[event.name] = True
# Add the event dictionaryto the solution object
solution.extrap_events = extrap_events
return [k for k, v in extrap_events.items() if v]
def _set_up_ext_and_inputs(self, model, external_variables, inputs):
"""Set up external variables and input parameters"""
inputs = inputs or {}
# Go through all input parameters that can be found in the model
# If any of them are *not* provided by "inputs", a symbolic input parameter is
# created, with appropriate size
for input_param in model.input_parameters:
name = input_param.name
if name not in inputs:
# Only allow symbolic inputs for CasadiSolver and CasadiAlgebraicSolver
if not isinstance(
self, (pybamm.CasadiSolver, pybamm.CasadiAlgebraicSolver)
):
raise pybamm.SolverError(
"Only CasadiSolver and CasadiAlgebraicSolver "
"can have symbolic inputs"
)
inputs[name] = casadi.MX.sym(name, input_param._expected_size)
external_variables = external_variables or {}
ext_and_inputs = {**external_variables, **inputs}
return ext_and_inputs
class SolverCallable:
"""A class that will be called by the solver when integrating"""
def __init__(self, function, name, model):
self._function = function
if isinstance(function, casadi.Function):
self.form = "casadi"
else:
self.form = "python"
self.name = name
self.model = model
self.timescale = self.model.timescale_eval
def __call__(self, t, y, inputs):
if self.name in ["RHS", "algebraic", "residuals"]:
pybamm.logger.debug(
"Evaluating {} for {} at t={}".format(
self.name, self.model.name, t * self.timescale
)
)
return self.function(t, y, inputs).flatten()
else:
return self.function(t, y, inputs)
def function(self, t, y, inputs):
if self.form == "casadi":
states_eval = self._function(t, y, inputs)
if self.name in ["RHS", "algebraic", "residuals", "event"]:
return states_eval.full()
else:
# keep jacobians sparse
return states_eval
else:
return self._function(t, y, inputs=inputs, known_evals={})[0]
class Residuals(SolverCallable):
"""Returns information about residuals at time t and state y"""
def __init__(self, function, name, model):
super().__init__(function, name, model)
if model.mass_matrix is not None:
self.mass_matrix = model.mass_matrix.entries
def __call__(self, t, y, ydot, inputs):
states_eval = super().__call__(t, y, inputs)
return states_eval - self.mass_matrix @ ydot
class InitialConditions(SolverCallable):
"""Returns initial conditions given inputs"""
def __init__(self, function, model):
super().__init__(function, "initial conditions", model)
self.y_dummy = np.zeros(model.concatenated_initial_conditions.shape)
def __call__(self, inputs):
if self.form == "casadi":
if isinstance(inputs, dict):
inputs = casadi.vertcat(*[x for x in inputs.values()])
return self._function(0, self.y_dummy, inputs)
else:
return self._function(0, self.y_dummy, inputs=inputs).flatten()
|
#!/usr/bin/env python
# This file is part of the OpenMV project.
# Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
# This work is licensed under the MIT license, see the file LICENSE for
# details.
"""This module implements enough functionality to program the STM32F4xx over
DFU, without requiring dfu-util.
See app note AN3156 for a description of the DFU protocol.
See document UM0391 for a dscription of the DFuse file.
"""
from __future__ import print_function
import argparse
import collections
import inspect
import re
import struct
import sys
import usb.core
import usb.util
import zlib
# VID/PID
__VID = 0x0483
__PID = 0xDF11
# USB request __TIMEOUT
__TIMEOUT = 4000
# DFU commands
__DFU_DETACH = 0
__DFU_DNLOAD = 1
__DFU_UPLOAD = 2
__DFU_GETSTATUS = 3
__DFU_CLRSTATUS = 4
__DFU_GETSTATE = 5
__DFU_ABORT = 6
# DFU status
__DFU_STATE_APP_IDLE = 0x00
__DFU_STATE_APP_DETACH = 0x01
__DFU_STATE_DFU_IDLE = 0x02
__DFU_STATE_DFU_DOWNLOAD_SYNC = 0x03
__DFU_STATE_DFU_DOWNLOAD_BUSY = 0x04
__DFU_STATE_DFU_DOWNLOAD_IDLE = 0x05
__DFU_STATE_DFU_MANIFEST_SYNC = 0x06
__DFU_STATE_DFU_MANIFEST = 0x07
__DFU_STATE_DFU_MANIFEST_WAIT_RESET = 0x08
__DFU_STATE_DFU_UPLOAD_IDLE = 0x09
__DFU_STATE_DFU_ERROR = 0x0A
_DFU_DESCRIPTOR_TYPE = 0x21
__DFU_STATUS_STR = {
__DFU_STATE_APP_IDLE: "STATE_APP_IDLE",
__DFU_STATE_APP_DETACH: "STATE_APP_DETACH",
__DFU_STATE_DFU_IDLE: "STATE_DFU_IDLE",
__DFU_STATE_DFU_DOWNLOAD_SYNC: "STATE_DFU_DOWNLOAD_SYNC",
__DFU_STATE_DFU_DOWNLOAD_BUSY: "STATE_DFU_DOWNLOAD_BUSY",
__DFU_STATE_DFU_DOWNLOAD_IDLE: "STATE_DFU_DOWNLOAD_IDLE",
__DFU_STATE_DFU_MANIFEST_SYNC: "STATE_DFU_MANIFEST_SYNC",
__DFU_STATE_DFU_MANIFEST: "STATE_DFU_MANIFEST",
__DFU_STATE_DFU_MANIFEST_WAIT_RESET: "STATE_DFU_MANIFEST_WAIT_RESET",
__DFU_STATE_DFU_UPLOAD_IDLE: "STATE_DFU_UPLOAD_IDLE",
__DFU_STATE_DFU_ERROR: "STATE_DFU_ERROR",
}
# USB device handle
__dev = None
# Configuration descriptor of the device
__cfg_descr = None
__verbose = None
# USB DFU interface
__DFU_INTERFACE = 0
# Python 3 deprecated getargspec in favour of getfullargspec, but
# Python 2 doesn't have the latter, so detect which one to use
getargspec = getattr(inspect, "getfullargspec", inspect.getargspec)
if "length" in getargspec(usb.util.get_string).args:
# PyUSB 1.0.0.b1 has the length argument
def get_string(dev, index):
return usb.util.get_string(dev, 255, index)
else:
# PyUSB 1.0.0.b2 dropped the length argument
def get_string(dev, index):
return usb.util.get_string(dev, index)
def find_dfu_cfg_descr(descr):
if len(descr) == 9 and descr[0] == 9 and descr[1] == _DFU_DESCRIPTOR_TYPE:
nt = collections.namedtuple(
"CfgDescr",
[
"bLength",
"bDescriptorType",
"bmAttributes",
"wDetachTimeOut",
"wTransferSize",
"bcdDFUVersion",
],
)
return nt(*struct.unpack("<BBBHHH", bytearray(descr)))
return None
def init():
"""Initializes the found DFU device so that we can program it."""
global __dev, __cfg_descr
devices = get_dfu_devices(idVendor=__VID, idProduct=__PID)
if not devices:
raise ValueError("No DFU device found")
if len(devices) > 1:
raise ValueError("Multiple DFU devices found")
__dev = devices[0]
__dev.set_configuration()
# Claim DFU interface
usb.util.claim_interface(__dev, __DFU_INTERFACE)
# Find the DFU configuration descriptor, either in the device or interfaces
__cfg_descr = None
for cfg in __dev.configurations():
__cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors)
if __cfg_descr:
break
for itf in cfg.interfaces():
__cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors)
if __cfg_descr:
break
# Get device into idle state
for attempt in range(4):
status = get_status()
if status == __DFU_STATE_DFU_IDLE:
break
elif status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE:
abort_request()
else:
clr_status()
def abort_request():
"""Sends an abort request."""
__dev.ctrl_transfer(0x21, __DFU_ABORT, 0, __DFU_INTERFACE, None, __TIMEOUT)
def clr_status():
"""Clears any error status (perhaps left over from a previous session)."""
__dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE, None, __TIMEOUT)
def get_status():
"""Get the status of the last operation."""
stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE, 6, 20000)
# firmware can provide an optional string for any error
if stat[5]:
message = get_string(__dev, stat[5])
if message:
print(message)
return stat[4]
def check_status(stage, expected):
status = get_status()
if status != expected:
raise SystemExit("DFU: %s failed (%s)" % (stage, __DFU_STATUS_STR.get(status, status)))
def mass_erase():
"""Performs a MASS erase (i.e. erases the entire device)."""
# Send DNLOAD with first byte=0x41
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def page_erase(addr):
"""Erases a single page."""
if __verbose:
print("Erasing page: 0x%x..." % (addr))
# Send DNLOAD with first byte=0x41 and page address
buf = struct.pack("<BI", 0x41, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def set_address(addr):
"""Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address
buf = struct.pack("<BI", 0x21, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
"""Writes a buffer into memory. This routine assumes that memory has
already been erased.
"""
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print(
"Addr 0x%x %dKBs/%dKBs..."
% (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)
)
if progress and xfer_count % 2 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size)
# Set mem write address
set_address(xfer_base + xfer_bytes)
# Send DNLOAD with fw data
chunk = min(__cfg_descr.wTransferSize, xfer_total - xfer_bytes)
__dev.ctrl_transfer(
0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes : xfer_bytes + chunk], __TIMEOUT
)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
xfer_count += 1
xfer_bytes += chunk
def write_page(buf, xfer_offset):
"""Writes a single page. This routine assumes that memory has already
been erased.
"""
xfer_base = 0x08000000
# Set mem write address
set_address(xfer_base + xfer_offset)
# Send DNLOAD with fw data
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
if __verbose:
print("Write: 0x%x " % (xfer_base + xfer_offset))
def exit_dfu():
"""Exit DFU mode, and start running the program."""
# Set jump address
set_address(0x08000000)
# Send DNLOAD with 0 length to exit DFU
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT)
try:
# Execute last command
if get_status() != __DFU_STATE_DFU_MANIFEST:
print("Failed to reset device")
# Release device
usb.util.dispose_resources(__dev)
except:
pass
def named(values, names):
"""Creates a dict with `names` as fields, and `values` as values."""
return dict(zip(names.split(), values))
def consume(fmt, data, names):
"""Parses the struct defined by `fmt` from `data`, stores the parsed fields
into a named tuple using `names`. Returns the named tuple, and the data
with the struct stripped off."""
size = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:size]), names), data[size:]
def cstring(string):
"""Extracts a null-terminated string from a byte array."""
return string.decode("utf-8").split("\0", 1)[0]
def compute_crc(data):
"""Computes the CRC32 value for the data passed in."""
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def read_dfu_file(filename):
"""Reads a DFU file, and parses the individual elements from the file.
Returns an array of elements. Each element is a dictionary with the
following keys:
num - The element index.
address - The address that the element data should be written to.
size - The size of the element data.
data - The element data.
If an error occurs while parsing the file, then None is returned.
"""
print("File: {}".format(filename))
with open(filename, "rb") as fin:
data = fin.read()
crc = compute_crc(data[:-4])
elements = []
# Decode the DFU Prefix
#
# <5sBIB
# < little endian Endianness
# 5s char[5] signature "DfuSe"
# B uint8_t version 1
# I uint32_t size Size of the DFU file (without suffix)
# B uint8_t targets Number of targets
dfu_prefix, data = consume("<5sBIB", data, "signature version size targets")
print(
" %(signature)s v%(version)d, image size: %(size)d, "
"targets: %(targets)d" % dfu_prefix
)
for target_idx in range(dfu_prefix["targets"]):
# Decode the Image Prefix
#
# <6sBI255s2I
# < little endian Endianness
# 6s char[6] signature "Target"
# B uint8_t altsetting
# I uint32_t named Bool indicating if a name was used
# 255s char[255] name Name of the target
# I uint32_t size Size of image (without prefix)
# I uint32_t elements Number of elements in the image
img_prefix, data = consume(
"<6sBI255s2I", data, "signature altsetting named name " "size elements"
)
img_prefix["num"] = target_idx
if img_prefix["named"]:
img_prefix["name"] = cstring(img_prefix["name"])
else:
img_prefix["name"] = ""
print(
" %(signature)s %(num)d, alt setting: %(altsetting)s, "
'name: "%(name)s", size: %(size)d, elements: %(elements)d' % img_prefix
)
target_size = img_prefix["size"]
target_data = data[:target_size]
data = data[target_size:]
for elem_idx in range(img_prefix["elements"]):
# Decode target prefix
#
# <2I
# < little endian Endianness
# I uint32_t element Address
# I uint32_t element Size
elem_prefix, target_data = consume("<2I", target_data, "addr size")
elem_prefix["num"] = elem_idx
print(" %(num)d, address: 0x%(addr)08x, size: %(size)d" % elem_prefix)
elem_size = elem_prefix["size"]
elem_data = target_data[:elem_size]
target_data = target_data[elem_size:]
elem_prefix["data"] = elem_data
elements.append(elem_prefix)
if len(target_data):
print("target %d PARSE ERROR" % target_idx)
# Decode DFU Suffix
#
# <4H3sBI
# < little endian Endianness
# H uint16_t device Firmware version
# H uint16_t product
# H uint16_t vendor
# H uint16_t dfu 0x11a (DFU file format version)
# 3s char[3] ufd "UFD"
# B uint8_t len 16
# I uint32_t crc32 Checksum
dfu_suffix = named(
struct.unpack("<4H3sBI", data[:16]), "device product vendor dfu ufd len crc"
)
print(
" usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, "
"dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x" % dfu_suffix
)
if crc != dfu_suffix["crc"]:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
return
data = data[16:]
if data:
print("PARSE ERROR")
return
return elements
class FilterDFU(object):
"""Class for filtering USB devices to identify devices which are in DFU
mode.
"""
def __call__(self, device):
for cfg in device:
for intf in cfg:
return intf.bInterfaceClass == 0xFE and intf.bInterfaceSubClass == 1
def get_dfu_devices(*args, **kwargs):
"""Returns a list of USB devices which are currently in DFU mode.
Additional filters (like idProduct and idVendor) can be passed in
to refine the search.
"""
# Convert to list for compatibility with newer PyUSB
return list(usb.core.find(*args, find_all=True, custom_match=FilterDFU(), **kwargs))
def get_memory_layout(device):
"""Returns an array which identifies the memory layout. Each entry
of the array will contain a dictionary with the following keys:
addr - Address of this memory segment.
last_addr - Last address contained within the memory segment.
size - Size of the segment, in bytes.
num_pages - Number of pages in the segment.
page_size - Size of each page, in bytes.
"""
cfg = device[0]
intf = cfg[(0, 0)]
mem_layout_str = get_string(device, intf.iInterface)
mem_layout = mem_layout_str.split("/")
result = []
for mem_layout_index in range(1, len(mem_layout), 2):
addr = int(mem_layout[mem_layout_index], 0)
segments = mem_layout[mem_layout_index + 1].split(",")
seg_re = re.compile(r"(\d+)\*(\d+)(.)(.)")
for segment in segments:
seg_match = seg_re.match(segment)
num_pages = int(seg_match.groups()[0], 10)
page_size = int(seg_match.groups()[1], 10)
multiplier = seg_match.groups()[2]
if multiplier == "K":
page_size *= 1024
if multiplier == "M":
page_size *= 1024 * 1024
size = num_pages * page_size
last_addr = addr + size - 1
result.append(
named(
(addr, last_addr, size, num_pages, page_size),
"addr last_addr size num_pages page_size",
)
)
addr += size
return result
def list_dfu_devices(*args, **kwargs):
"""Prints a lits of devices detected in DFU mode."""
devices = get_dfu_devices(*args, **kwargs)
if not devices:
raise SystemExit("No DFU capable devices found")
for device in devices:
print(
"Bus {} Device {:03d}: ID {:04x}:{:04x}".format(
device.bus, device.address, device.idVendor, device.idProduct
)
)
layout = get_memory_layout(device)
print("Memory Layout")
for entry in layout:
print(
" 0x{:x} {:2d} pages of {:3d}K bytes".format(
entry["addr"], entry["num_pages"], entry["page_size"] // 1024
)
)
def write_elements(elements, mass_erase_used, progress=None):
"""Writes the indicated elements into the target memory,
erasing as needed.
"""
mem_layout = get_memory_layout(__dev)
for elem in elements:
addr = elem["addr"]
size = elem["size"]
data = elem["data"]
elem_size = size
elem_addr = addr
if progress:
progress(elem_addr, 0, elem_size)
while size > 0:
write_size = size
if not mass_erase_used:
for segment in mem_layout:
if addr >= segment["addr"] and addr <= segment["last_addr"]:
# We found the page containing the address we want to
# write, erase it
page_size = segment["page_size"]
page_addr = addr & ~(page_size - 1)
if addr + write_size > page_addr + page_size:
write_size = page_addr + page_size - addr
page_erase(page_addr)
break
write_memory(addr, data[:write_size], progress, elem_addr, elem_size)
data = data[write_size:]
addr += write_size
size -= write_size
if progress:
progress(elem_addr, addr - elem_addr, elem_size)
def cli_progress(addr, offset, size):
"""Prints a progress report suitable for use on the command line."""
width = 25
done = offset * width // size
print(
"\r0x{:08x} {:7d} [{}{}] {:3d}% ".format(
addr, size, "=" * done, " " * (width - done), offset * 100 // size
),
end="",
)
try:
sys.stdout.flush()
except OSError:
pass # Ignore Windows CLI "WinError 87" on Python 3.6
if offset == size:
print("")
def main():
"""Test program for verifying this files functionality."""
global __verbose
global __VID
global __PID
# Parse CMD args
parser = argparse.ArgumentParser(description="DFU Python Util")
parser.add_argument(
"-l", "--list", help="list available DFU devices", action="store_true", default=False
)
parser.add_argument("--vid", help="USB Vendor ID", type=lambda x: int(x, 0), default=__VID)
parser.add_argument("--pid", help="USB Product ID", type=lambda x: int(x, 0), default=__PID)
parser.add_argument(
"-m", "--mass-erase", help="mass erase device", action="store_true", default=False
)
parser.add_argument(
"-u", "--upload", help="read file from DFU device", dest="path", default=False
)
parser.add_argument("-x", "--exit", help="Exit DFU", action="store_true", default=False)
parser.add_argument(
"-v", "--verbose", help="increase output verbosity", action="store_true", default=False
)
args = parser.parse_args()
__verbose = args.verbose
__VID = args.vid
__PID = args.pid
if args.list:
list_dfu_devices(idVendor=__VID, idProduct=__PID)
return
init()
command_run = False
if args.mass_erase:
print("Mass erase...")
mass_erase()
command_run = True
if args.path:
elements = read_dfu_file(args.path)
if not elements:
print("No data in dfu file")
return
print("Writing memory...")
write_elements(elements, args.mass_erase, progress=cli_progress)
print("Exiting DFU...")
exit_dfu()
command_run = True
if args.exit:
print("Exiting DFU...")
exit_dfu()
command_run = True
if command_run:
print("Finished")
else:
print("No command specified")
if __name__ == "__main__":
main()
|
from django.shortcuts import render
import requests
import json
from django.http import JsonResponse, HttpResponse, Http404
import json
from django.views.decorators.csrf import csrf_exempt
import json
import time
import logging
from .mist_smtp.mist_smtp import Mist_SMTP
from .lib.__req import Req
from .lib.psks import Psk
from .lib.wlans import Wlan
from .lib.sites import Sites
try:
from .config import disclaimer_config
disclaimer_config = {
"disclaimer": disclaimer_config.get("disclaimer", None),
"github_url": disclaimer_config.get("github_url", None),
"docker_url": disclaimer_config.get("docker_url", None)
}
except:
import os
disclaimer_config = {
"disclaimer": os.environ.get("APP_DISCLAIMER", None),
"github_url": os.environ.get("APP_GITHUB_URL", None),
"docker_url": os.environ.get("APP_DOCKER_URL", None)
}
#############################################
#### SMTP CONFIG
try:
from .config import smtp_config
smtp_config["host"] = smtp_config.get("host", None)
smtp_config["port"] = smtp_config.get("port", 587)
smtp_config["use_ssl"] = smtp_config.get("use_ssl", True)
smtp_config["username"] = smtp_config.get("username", None)
smtp_config["password"] = smtp_config.get("password", None)
smtp_config["from_name"] = smtp_config.get("from_name", "Wi-Fi Access")
smtp_config["from_email"] = smtp_config.get("from_email", None)
smtp_config["logo_url"] = smtp_config.get(
"logo_url", "https://cdn.mist.com/wp-content/uploads/logo.png")
smtp_config["enable_qrcode"] = smtp_config.get("enable_qrcode", True)
except:
import os
smtp_enabled = os.environ.get("MIST_SMTP_ENABLED", default=False)
if smtp_enabled:
smtp_config = {
"host": os.environ.get("MIST_SMTP_HOST", default=None),
"port": os.environ.get("MIST_SMTP_PORT", default=587),
"use_ssl": os.environ.get("MIST_SMTP_SSL", default=True),
"username": os.environ.get("MIST_SMTP_USER", default=None),
"password": os.environ.get("MIST_SMTP_PASSWORD", default=None),
"from_name": os.environ.get("MIST_SMTP_FROM_NAME", default="Wi-Fi Access"),
"from_email": os.environ.get("MIST_SMTP_FROM_EMAIL", default=None),
"logo_url": os.environ.get("MIST_SMTP_LOGO_URL", default="https://cdn.mist.com/wp-content/uploads/logo.png"),
"enable_qrcode": os.environ.get("MIST_SMTP_QRCODE", default=True)
}
else:
smtp_config = None
finally:
mist_smtp = Mist_SMTP(smtp_config)
print("".ljust(80, "-"))
print(" SMTP CONFIG ".center(80))
print("")
print("host : {0}".format(smtp_config["host"]))
print("port : {0}".format(smtp_config["port"]))
print("use_ssl : {0}".format(smtp_config["use_ssl"]))
print("username : {0}".format(smtp_config["username"]))
print("from_name : {0}".format(smtp_config["from_name"]))
print("from_email : {0}".format(smtp_config["from_email"]))
print("logo_url : {0}".format(smtp_config["logo_url"]))
print("enable_qrcode : {0}".format(smtp_config["enable_qrcode"]))
print("")
#############################################
#### PSK CONFIG
try:
from .config import psk_config
psk_config["salt"] = psk_config.get(
"salt", "$2b$12$SIGWr574/7OggDO4BBJ1D.")
psk_config["length"] = psk_config.get("length", 12)
psk_config["default_expire_time"] = psk_config.get(
"default_expire_time", 0)
except:
psk_config = {
"default_expire_time": int(os.environ.get("MIST_PSK_DEFAULT_EXPIRE_TIME", default=0)),
"salt": os.environ.get("MIST_PSK_SALT", default="$2b$12$SIGWr574/7OggDO4BBJ1D."),
"length": int(os.environ.get("MIST_PSK_LENGTH", default=12))
}
finally:
print("".ljust(80, "-"))
print(" PSK CONFIG ".center(80))
print("")
print("default expire time: {0}".format(psk_config["default_expire_time"]))
print("length : {0}".format(psk_config["length"]))
print("")
psk_config["salt"] = str.encode(psk_config["salt"])
#############################################
#### VIEWS
##########
# PSK CONFIG
@csrf_exempt
def pskConfig(request):
if request.method == "GET":
response = {
"psk_length": psk_config["length"],
"default_expire_time": psk_config["default_expire_time"]
}
return JsonResponse(status=200, data=response)
else:
return Http404
##########
# PSK
@csrf_exempt
def psks(request):
if request.method == "POST":
response = Psk().pull(request.body)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
@csrf_exempt
def createPsk(request):
if request.method == "POST":
response = Psk().push(request.body, psk_config)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
@csrf_exempt
def deletePsk(request):
if request.method == "POST":
response = Psk().delete(request.body)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
##########
# Sites
@csrf_exempt
def sites(request):
if request.method == 'POST':
response = Sites().pull(request.body)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
##########
# WLANS
@csrf_exempt
def wlans(request):
if request.method == 'POST':
response = Wlan().pull(request.body)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
##########
# VLAN
@csrf_exempt
def vlans(request):
if request.method == "POST":
response = Wlan().change_vlan(request.body)
return JsonResponse(status=response["status"], data=response["data"])
else:
return Http404
##########
# LOGIN
def _get_self(request, host, method, headers={}, cookies=None):
if cookies == None:
cookies_dict = None
else:
cookies_dict = cookies.get_dict()
url = "https://%s/api/v1/self" % (host)
resp = requests.get(url, headers=headers, cookies=cookies)
data = resp.json()
return JsonResponse({"host": host, "data": data, "method": method, "headers": headers, "cookies": cookies_dict})
@csrf_exempt
def login(request):
if request.method == 'POST':
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
if "host" in body:
if "token" in body:
headers = {"Authorization": "Token " +
body["token"], 'Content-Type': "application/json"}
return _get_self(request, body["host"], "token", headers=headers)
elif "email" in body and "password" in body:
url = "https://%s/api/v1/login" % (body["host"])
data = {"email": body["email"], "password": body["password"]}
if "two_factor" in body:
data["two_factor"] = body["two_factor"]
headers = {'Content-Type': "application/json"}
resp = requests.post(url, json=data, headers={})
if resp.status_code == 200:
cookies = resp.cookies
return _get_self(request, body["host"], "username", headers=headers, cookies=cookies)
else:
return JsonResponse(status=400, data={"message": "authentication failed"})
elif "email" in body:
return JsonResponse(status=401, data={"message": "authentication information are missing"})
elif "password" in body:
return JsonResponse(status=401, data={"message": "authentication information are missing"})
else:
return JsonResponse(status=500, data={"message": "authentication information are missing"})
else:
return JsonResponse(status=500, data={"message": "host missing"})
else:
return JsonResponse(status=400, data={"message": "not allowed"})
##########
# EMAIL
@csrf_exempt
def emailPsk(request):
if request.method == 'POST':
body_unicode = request.body.decode("utf-8")
body = json.loads(body_unicode)
if "name" in body and "user_email" in body and "ssid" in body and "psk" in body:
resp = mist_smtp.send_psk(
body["psk"], body["ssid"], body["name"], body["user_email"], body.get("expire_time", None))
return JsonResponse({"result": resp})
else:
return JsonResponse(status=500, data={"message": "missing parametesr"})
@csrf_exempt
def disclaimer(request):
if request.method == "GET":
return JsonResponse(disclaimer_config)
else:
return JsonResponse(status=500, data={"message": "missing parametesr"})
|
import copy
import math
import os
import pickle as pkl
import sys
import time
import numpy as np
import dmc2gym
import hydra
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
from logger import Logger
from replay_buffer import ReplayBuffer
from video import VideoRecorder
torch.backends.cudnn.benchmark = True
def make_env(cfg):
"""Helper function to create dm_control environment"""
if cfg.env == 'ball_in_cup_catch':
domain_name = 'ball_in_cup'
task_name = 'catch'
elif cfg.env == 'point_mass_easy':
domain_name = 'point_mass'
task_name = 'easy'
else:
domain_name = cfg.env.split('_')[0]
task_name = '_'.join(cfg.env.split('_')[1:])
# per dreamer: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/wrappers.py#L26
camera_id = 2 if domain_name == 'quadruped' else 0
env = dmc2gym.make(domain_name=domain_name,
task_name=task_name,
seed=cfg.seed,
visualize_reward=False,
from_pixels=True,
height=cfg.image_size,
width=cfg.image_size,
frame_skip=cfg.action_repeat,
camera_id=camera_id)
env = utils.FrameStack(env, k=cfg.frame_stack)
env.seed(cfg.seed)
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
return env
class Workspace(object):
def __init__(self, cfg):
self.work_dir = os.getcwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
self.logger = Logger(self.work_dir+"_"+self.cfg.env+"_eval2k_effective_{}_seed_{}".format(self.cfg.effective_aug, self.cfg.seed),
save_tb=cfg.log_save_tb,
log_frequency=cfg.log_frequency_step,
agent=cfg.agent.name,
action_repeat=cfg.action_repeat)
self.effective_aug = self.cfg.effective_aug
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.env = make_env(cfg)
cfg.agent.params.obs_shape = self.env.observation_space.shape
cfg.agent.params.action_shape = self.env.action_space.shape
cfg.agent.params.action_range = [
float(self.env.action_space.low.min()),
float(self.env.action_space.high.max())
]
self.agent = hydra.utils.instantiate(cfg.agent)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape,
self.env.action_space.shape,
cfg.replay_buffer_capacity,
self.cfg.image_pad, self.device, self.effective_aug)
self.video_recorder = VideoRecorder(
self.work_dir if cfg.save_video else None)
self.step = 0
def evaluate(self):
average_episode_reward = 0
for episode in range(self.cfg.num_eval_episodes):
obs = self.env.reset()
self.video_recorder.init(enabled=(episode == 0))
done = False
episode_reward = 0
episode_step = 0
while not done:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, info = self.env.step(action)
self.video_recorder.record(self.env)
episode_reward += reward
episode_step += 1
average_episode_reward += episode_reward
self.video_recorder.save(f'{self.step}.mp4')
average_episode_reward /= self.cfg.num_eval_episodes
self.logger.log('eval/episode_reward', average_episode_reward,
self.step)
self.logger.dump(self.step)
def run(self):
episode, episode_reward, episode_step, done = 0, 0, 1, True
start_time = time.time()
while self.step < self.cfg.num_train_steps:
if done:
if self.step > 0:
self.logger.log('train/duration',
time.time() - start_time, self.step)
start_time = time.time()
self.logger.dump(
self.step, save=(self.step > self.cfg.num_seed_steps))
# evaluate agent periodically
if self.step % self.cfg.eval_frequency == 0:
self.logger.log('eval/episode', episode, self.step)
self.evaluate()
self.logger.log('train/episode_reward', episode_reward,
self.step)
obs = self.env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
self.logger.log('train/episode', episode, self.step)
# sample action for data collection
if self.step < self.cfg.num_seed_steps:
action = self.env.action_space.sample()
else:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
# run training update
if self.step >= self.cfg.num_seed_steps:
for _ in range(self.cfg.num_train_iters):
self.agent.update(self.replay_buffer, self.logger,
self.step)
next_obs, reward, done, info = self.env.step(action)
# allow infinite bootstrap
done = float(done)
done_no_max = 0 if episode_step + 1 == self.env._max_episode_steps else done
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done,
done_no_max)
obs = next_obs
episode_step += 1
self.step += 1
@hydra.main(config_path='config.yaml', strict=True)
def main(cfg):
from train import Workspace as W
workspace = W(cfg)
workspace.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
# import necessary libraries
import os
import sys
import unittest
#allow the script to be run directly
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#import function to test
from youtube_dl.utils import formatSeconds
#Unit test designed to test the seconds formatting function found in the util.py script
#funtion takes in a value in seconds and formats the output
class test_format_seconds(unittest.TestCase):
#test a standard sub one minute input
def test_format_seconds_under_minute (self):
self.assertEqual(formatSeconds(30), '30')
#test an edge case of exactly 60 seconds
def test_format_seconds_one_minute (self):
self.assertEqual(formatSeconds(60), '60')
#test minute formatting
def test_format_seconds_one_minute (self):
self.assertEqual(formatSeconds(61), '1:01')
def test_format_seconds_ten_minutes (self):
#test double digit minute formatting
self.assertEqual(formatSeconds(600), '10:00')
#test extreme case if 24 hours
def test_format_second_24_hours (self):
self.assertEqual(formatSeconds(86400), '24:00:00')
if __name__ == '__main__':
unittest.main()
|
import files.prime as prime
p = prime.all_primes(1000)
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import numpy
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
conv1_weights = tf.Variable(
tf.truncated_normal([7, 7, NUM_CHANNELS, 64],
stddev=0.01,
seed=SEED),
name = 'conv1w' )
conv1_biases = tf.Variable(tf.zeros([64]), name='conv1b')
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 64, 256], # 5x5 filter, depth 32.
stddev=0.01,
seed=SEED),
name = 'conv2w' )
conv2_biases = tf.Variable(tf.zeros([256]), name='conv2b')
conv3_weights = tf.Variable(
tf.truncated_normal([3, 3, 256, 512], # 5x5 filter, depth 32.
stddev=0.01,
seed=SEED),
name = 'conv2w' )
conv3_biases = tf.Variable(tf.zeros([512]), name='conv2b')
conv4_weights = tf.Variable(
tf.truncated_normal([3, 3, 512, 1024], # 5x5 filter, depth 32.
stddev=0.01,
seed=SEED),
name = 'conv2w' )
conv4_biases = tf.Variable(tf.zeros([1024]), name='conv2b')
conv5_weights = tf.Variable(
tf.truncated_normal([3, 3, 1024, 512], # 5x5 filter, depth 32.
stddev=0.01,
seed=SEED),
name = 'conv2w' )
conv5_biases = tf.Variable(tf.zeros([512]), name='conv2b')
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[2 * 2 * 512, 4096],
stddev=0.01,
seed=SEED), name='fc1w')
fc1_biases = tf.Variable(tf.constant(0.01, shape=[4096]), name='fc1b')
fc2_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[4096, 4096],
stddev=0.01,
seed=SEED), name='fc2w')
fc2_biases = tf.Variable(tf.constant(0.01, shape=[4096]), name='fc2b')
fc_out_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[4096, 10],
stddev=0.01,
seed=SEED), name='fcoutw')
fc_out_biases = tf.Variable(tf.constant(0.1, shape=[10]), name='fc1outb')
def model(data, train=False):
"""The Model definition."""
conv1 = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
conv1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID')
conv2 = tf.nn.conv2d(pool1,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
conv2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID')
conv3 = tf.nn.conv2d(pool2,
conv3_weights,
strides=[1, 1, 1, 1],
padding='SAME')
conv3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
conv4 = tf.nn.conv2d(conv3,
conv4_weights,
strides=[1, 1, 1, 1],
padding='SAME')
conv4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
#conv5 = tf.nn.conv2d(conv4,
# conv5_weights,
# strides=[1, 1, 1, 1],
# padding='SAME')
#conv5 = tf.nn.relu(tf.nn.bias_add(conv5, conv5_biases))
pool5 = tf.nn.max_pool(conv4,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID')
pool5_shape = pool5.get_shape().as_list()
reshape = tf.reshape(pool5, [pool5_shape[0], np.prod(pool5_shape[1:])])
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed=SEED)
#fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
#if train:
# fc2 = tf.nn.dropout(fc2, 0.5, seed=SEED)
fc_out = tf.matmul(fc1, fc_out_weights) + fc_out_biases
return fc_out
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
#regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
# tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
#loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, trainable=False)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
TV = tf.trainable_variables()
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
|
from Blackjack.src.deck import Deck
import unittest
class TestDeck(unittest.TestCase):
def setUp(self):
self.deck = Deck()
def test_loop_draw(self):
for _ in range(100):
self.deck.draw_card()
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from io import StringIO
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from synapse.api.errors import Codes, RedirectException, SynapseError
from synapse.config.server import parse_listener_def
from synapse.http.server import DirectServeHtmlResource, JsonResource, OptionsResource
from synapse.http.site import SynapseSite, logger
from synapse.logging.context import make_deferred_yieldable
from synapse.util import Clock
from tests import unittest
from tests.server import (
FakeTransport,
ThreadedMemoryReactorClock,
make_request,
render,
setup_test_homeserver,
)
class JsonResourceTests(unittest.TestCase):
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
self.hs_clock = Clock(self.reactor)
self.homeserver = setup_test_homeserver(
self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
)
def test_handler_for_request(self):
"""
JsonResource.handler_for_request gives correctly decoded URL args to
the callback, while Twisted will give the raw bytes of URL query
arguments.
"""
got_kwargs = {}
def _callback(request, **kwargs):
got_kwargs.update(kwargs)
return 200, kwargs
res = JsonResource(self.homeserver)
res.register_paths(
"GET",
[re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")],
_callback,
"test_servlet",
)
request, channel = make_request(
self.reactor, b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83"
)
render(request, res, self.reactor)
self.assertEqual(request.args, {b"a": ["\N{SNOWMAN}".encode("utf8")]})
self.assertEqual(got_kwargs, {"room_id": "\N{SNOWMAN}"})
def test_callback_direct_exception(self):
"""
If the web callback raises an uncaught exception, it will be translated
into a 500.
"""
def _callback(request, **kwargs):
raise Exception("boo")
res = JsonResource(self.homeserver)
res.register_paths(
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"500")
def test_callback_indirect_exception(self):
"""
If the web callback raises an uncaught exception in a Deferred, it will
be translated into a 500.
"""
def _throw(*args):
raise Exception("boo")
def _callback(request, **kwargs):
d = Deferred()
d.addCallback(_throw)
self.reactor.callLater(1, d.callback, True)
return make_deferred_yieldable(d)
res = JsonResource(self.homeserver)
res.register_paths(
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"500")
def test_callback_synapseerror(self):
"""
If the web callback raises a SynapseError, it returns the appropriate
status code and message set in it.
"""
def _callback(request, **kwargs):
raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN)
res = JsonResource(self.homeserver)
res.register_paths(
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"403")
self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
def test_no_handler(self):
"""
If there is no handler to process the request, Synapse will return 400.
"""
def _callback(request, **kwargs):
"""
Not ever actually called!
"""
self.fail("shouldn't ever get here")
res = JsonResource(self.homeserver)
res.register_paths(
"GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
)
request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"400")
self.assertEqual(channel.json_body["error"], "Unrecognized request")
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
class OptionsResourceTests(unittest.TestCase):
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
class DummyResource(Resource):
isLeaf = True
def render(self, request):
return request.path
# Setup a resource with some children.
self.resource = OptionsResource()
self.resource.putChild(b"res", DummyResource())
def _make_request(self, method, path):
"""Create a request from the method/path and return a channel with the response."""
request, channel = make_request(self.reactor, method, path, shorthand=False)
request.prepath = [] # This doesn't get set properly by make_request.
# Create a site and query for the resource.
site = SynapseSite(
"test",
"site_tag",
parse_listener_def({"type": "http", "port": 0}),
self.resource,
"1.0",
)
request.site = site
resource = site.getResourceFor(request)
# Finally, render the resource and return the channel.
render(request, resource, self.reactor)
return channel
def test_unknown_options_request(self):
"""An OPTIONS requests to an unknown URL still returns 200 OK."""
channel = self._make_request(b"OPTIONS", b"/foo/")
self.assertEqual(channel.result["code"], b"200")
self.assertEqual(channel.result["body"], b"{}")
# Ensure the correct CORS headers have been added
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Origin"),
"has CORS Origin header",
)
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Methods"),
"has CORS Methods header",
)
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Headers"),
"has CORS Headers header",
)
def test_known_options_request(self):
"""An OPTIONS requests to an known URL still returns 200 OK."""
channel = self._make_request(b"OPTIONS", b"/res/")
self.assertEqual(channel.result["code"], b"200")
self.assertEqual(channel.result["body"], b"{}")
# Ensure the correct CORS headers have been added
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Origin"),
"has CORS Origin header",
)
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Methods"),
"has CORS Methods header",
)
self.assertTrue(
channel.headers.hasHeader(b"Access-Control-Allow-Headers"),
"has CORS Headers header",
)
def test_unknown_request(self):
"""A non-OPTIONS request to an unknown URL should 404."""
channel = self._make_request(b"GET", b"/foo/")
self.assertEqual(channel.result["code"], b"404")
def test_known_request(self):
"""A non-OPTIONS request to an known URL should query the proper resource."""
channel = self._make_request(b"GET", b"/res/")
self.assertEqual(channel.result["code"], b"200")
self.assertEqual(channel.result["body"], b"/res/")
class WrapHtmlRequestHandlerTests(unittest.TestCase):
class TestResource(DirectServeHtmlResource):
callback = None
async def _async_render_GET(self, request):
await self.callback(request)
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
def test_good_response(self):
def callback(request):
request.write(b"response")
request.finish()
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
request, channel = make_request(self.reactor, b"GET", b"/path")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"200")
body = channel.result["body"]
self.assertEqual(body, b"response")
def test_redirect_exception(self):
"""
If the callback raises a RedirectException, it is turned into a 30x
with the right location.
"""
def callback(request, **kwargs):
raise RedirectException(b"/look/an/eagle", 301)
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
request, channel = make_request(self.reactor, b"GET", b"/path")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"301")
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
self.assertEqual(location_headers, [b"/look/an/eagle"])
def test_redirect_exception_with_cookie(self):
"""
If the callback raises a RedirectException which sets a cookie, that is
returned too
"""
def callback(request, **kwargs):
e = RedirectException(b"/no/over/there", 304)
e.cookies.append(b"session=yespls")
raise e
res = WrapHtmlRequestHandlerTests.TestResource()
res.callback = callback
request, channel = make_request(self.reactor, b"GET", b"/path")
render(request, res, self.reactor)
self.assertEqual(channel.result["code"], b"304")
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
self.assertEqual(location_headers, [b"/no/over/there"])
cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
self.assertEqual(cookies_headers, [b"session=yespls"])
class SiteTestCase(unittest.HomeserverTestCase):
def test_lose_connection(self):
"""
We log the URI correctly redacted when we lose the connection.
"""
class HangingResource(Resource):
"""
A Resource that strategically hangs, as if it were processing an
answer.
"""
def render(self, request):
return NOT_DONE_YET
# Set up a logging handler that we can inspect afterwards
output = StringIO()
handler = logging.StreamHandler(output)
logger.addHandler(handler)
old_level = logger.level
logger.setLevel(10)
self.addCleanup(logger.setLevel, old_level)
self.addCleanup(logger.removeHandler, handler)
# Make a resource and a Site, the resource will hang and allow us to
# time out the request while it's 'processing'
base_resource = Resource()
base_resource.putChild(b"", HangingResource())
site = SynapseSite(
"test", "site_tag", self.hs.config.listeners[0], base_resource, "1.0"
)
server = site.buildProtocol(None)
client = AccumulatingProtocol()
client.makeConnection(FakeTransport(server, self.reactor))
server.makeConnection(FakeTransport(client, self.reactor))
# Send a request with an access token that will get redacted
server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n")
self.pump()
# Lose the connection
e = Failure(Exception("Failed123"))
server.connectionLost(e)
handler.flush()
# Our access token is redacted and the failure reason is logged.
self.assertIn("/?access_token=<redacted>", output.getvalue())
self.assertIn("Failed123", output.getvalue())
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import codecs
import doctest
import os
import re
import shutil
import sys
import tempfile
import time
import traceback
from extra.beep.beep import beep
from lib.controller.controller import start
from lib.core.common import checkIntegrity
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getUnicode
from lib.core.common import randomStr
from lib.core.common import readXmlFile
from lib.core.data import conf
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.log import LOGGER_HANDLER
from lib.core.option import init
from lib.core.option import initOptions
from lib.core.option import setVerbosity
from lib.core.optiondict import optDict
from lib.core.settings import UNICODE_ENCODING
from lib.parse.cmdline import cmdLineParser
class Failures(object):
failedItems = None
failedParseOn = None
failedTraceBack = None
_failures = Failures()
def smokeTest():
"""
Runs the basic smoke testing of a program
"""
retVal = True
count, length = 0, 0
if not checkIntegrity():
retVal = False
for root, _, files in os.walk(paths.SQLMAP_ROOT_PATH):
if any(_ in root for _ in ("thirdparty", "extra")):
continue
for filename in files:
if os.path.splitext(filename)[1].lower() == ".py" and filename != "__init__.py":
length += 1
for root, _, files in os.walk(paths.SQLMAP_ROOT_PATH):
if any(_ in root for _ in ("thirdparty", "extra")):
continue
for filename in files:
if os.path.splitext(filename)[1].lower() == ".py" and filename != "__init__.py":
path = os.path.join(root, os.path.splitext(filename)[0])
path = path.replace(paths.SQLMAP_ROOT_PATH, '.')
path = path.replace(os.sep, '.').lstrip('.')
try:
__import__(path)
module = sys.modules[path]
except Exception as ex:
retVal = False
dataToStdout("\r")
errMsg = "smoke test failed at importing module '%s' (%s):\n%s" % (path, os.path.join(root, filename), ex)
logger.error(errMsg)
else:
# Run doc tests
# Reference: http://docs.python.org/library/doctest.html
(failure_count, test_count) = doctest.testmod(module)
if failure_count > 0:
retVal = False
count += 1
status = '%d/%d (%d%%) ' % (count, length, round(100.0 * count / length))
dataToStdout("\r[%s] [INFO] complete: %s" % (time.strftime("%X"), status))
clearConsoleLine()
if retVal:
logger.info("smoke test final result: PASSED")
else:
logger.error("smoke test final result: FAILED")
return retVal
def adjustValueType(tagName, value):
for family in optDict.keys():
for name, type_ in optDict[family].items():
if type(type_) == tuple:
type_ = type_[0]
if tagName == name:
if type_ == "boolean":
value = (value == "True")
elif type_ == "integer":
value = int(value)
elif type_ == "float":
value = float(value)
break
return value
def liveTest():
"""
Runs the test of a program against the live testing environment
"""
retVal = True
count = 0
global_ = {}
vars_ = {}
livetests = readXmlFile(paths.LIVE_TESTS_XML)
length = len(livetests.getElementsByTagName("case"))
element = livetests.getElementsByTagName("global")
if element:
for item in element:
for child in item.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
global_[child.tagName] = adjustValueType(child.tagName, child.getAttribute("value"))
element = livetests.getElementsByTagName("vars")
if element:
for item in element:
for child in item.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
var = child.getAttribute("value")
vars_[child.tagName] = randomStr(6) if var == "random" else var
for case in livetests.getElementsByTagName("case"):
parse_from_console_output = False
count += 1
name = None
parse = []
switches = dict(global_)
value = ""
vulnerable = True
result = None
if case.hasAttribute("name"):
name = case.getAttribute("name")
if conf.runCase and ((conf.runCase.isdigit() and conf.runCase != count) or not re.search(conf.runCase, name, re.DOTALL)):
continue
if case.getElementsByTagName("switches"):
for child in case.getElementsByTagName("switches")[0].childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
value = replaceVars(child.getAttribute("value"), vars_)
switches[child.tagName] = adjustValueType(child.tagName, value)
if case.getElementsByTagName("parse"):
for item in case.getElementsByTagName("parse")[0].getElementsByTagName("item"):
if item.hasAttribute("value"):
value = replaceVars(item.getAttribute("value"), vars_)
if item.hasAttribute("console_output"):
parse_from_console_output = bool(item.getAttribute("console_output"))
parse.append((value, parse_from_console_output))
conf.verbose = global_.get("verbose", 1)
setVerbosity()
msg = "running live test case: %s (%d/%d)" % (name, count, length)
logger.info(msg)
initCase(switches, count)
test_case_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "test_case"), "wb", UNICODE_ENCODING)
test_case_fd.write("%s\n" % name)
try:
result = runCase(parse)
except SqlmapNotVulnerableException:
vulnerable = False
finally:
conf.verbose = global_.get("verbose", 1)
setVerbosity()
if result is True:
logger.info("test passed")
cleanCase()
else:
errMsg = "test failed"
if _failures.failedItems:
errMsg += " at parsing items: %s" % ", ".join(i for i in _failures.failedItems)
errMsg += " - scan folder: %s" % paths.SQLMAP_OUTPUT_PATH
errMsg += " - traceback: %s" % bool(_failures.failedTraceBack)
if not vulnerable:
errMsg += " - SQL injection not detected"
logger.error(errMsg)
test_case_fd.write("%s\n" % errMsg)
if _failures.failedParseOn:
console_output_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "console_output"), "wb", UNICODE_ENCODING)
console_output_fd.write(_failures.failedParseOn)
console_output_fd.close()
if _failures.failedTraceBack:
traceback_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "traceback"), "wb", UNICODE_ENCODING)
traceback_fd.write(_failures.failedTraceBack)
traceback_fd.close()
beep()
if conf.stopFail is True:
return retVal
test_case_fd.close()
retVal &= bool(result)
dataToStdout("\n")
if retVal:
logger.info("live test final result: PASSED")
else:
logger.error("live test final result: FAILED")
return retVal
def initCase(switches, count):
_failures.failedItems = []
_failures.failedParseOn = None
_failures.failedTraceBack = None
paths.SQLMAP_OUTPUT_PATH = tempfile.mkdtemp(prefix="%s%d-" % (MKSTEMP_PREFIX.TESTING, count))
paths.SQLMAP_DUMP_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "dump")
paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "files")
logger.debug("using output directory '%s' for this test case" % paths.SQLMAP_OUTPUT_PATH)
LOGGER_HANDLER.stream = sys.stdout = tempfile.SpooledTemporaryFile(max_size=0, mode="w+b", prefix="sqlmapstdout-")
cmdLineOptions = cmdLineParser()
if switches:
for key, value in switches.items():
if key in cmdLineOptions.__dict__:
cmdLineOptions.__dict__[key] = value
initOptions(cmdLineOptions, True)
init()
def cleanCase():
shutil.rmtree(paths.SQLMAP_OUTPUT_PATH, True)
def runCase(parse):
retVal = True
handled_exception = None
unhandled_exception = None
result = False
console = ""
try:
result = start()
except KeyboardInterrupt:
pass
except SqlmapBaseException as ex:
handled_exception = ex
except Exception as ex:
unhandled_exception = ex
finally:
sys.stdout.seek(0)
console = sys.stdout.read()
LOGGER_HANDLER.stream = sys.stdout = sys.__stdout__
if unhandled_exception:
_failures.failedTraceBack = "unhandled exception: %s" % str(traceback.format_exc())
retVal = None
elif handled_exception:
_failures.failedTraceBack = "handled exception: %s" % str(traceback.format_exc())
retVal = None
elif result is False: # this means no SQL injection has been detected - if None, ignore
retVal = False
console = getUnicode(console, encoding=sys.stdin.encoding)
if parse and retVal:
with codecs.open(conf.dumper.getOutputFile(), "rb", UNICODE_ENCODING) as f:
content = f.read()
for item, parse_from_console_output in parse:
parse_on = console if parse_from_console_output else content
if item.startswith("r'") and item.endswith("'"):
if not re.search(item[2:-1], parse_on, re.DOTALL):
retVal = None
_failures.failedItems.append(item)
elif item not in parse_on:
retVal = None
_failures.failedItems.append(item)
if _failures.failedItems:
_failures.failedParseOn = console
elif retVal is False:
_failures.failedParseOn = console
return retVal
def replaceVars(item, vars_):
retVal = item
if item and vars_:
for var in re.findall("\$\{([^}]+)\}", item):
if var in vars_:
retVal = retVal.replace("${%s}" % var, vars_[var])
return retVal
|
#!/usr/bin/env python
#-*- coding: utf8 -*-
from pygame.locals import *
import pygame
import sys
class EventManager (object):
def __init__(self):
self._stopped = False
self._toggle_full_screen = False
def get_events(self):
ret = []
self._stopped = False
self._toggle_full_screen = False
mods = pygame.key.get_mods()
metaPressed = mods & pygame.KMOD_META
for event in pygame.event.get():
if self._is_quit(event, metaPressed):
pygame.quit()
sys.exit(0)
if event.type == pygame.KEYDOWN and event.key in [K_ESCAPE, K_PAUSE] or \
event.type == pygame.JOYBUTTONDOWN and event.button == 5:
self._stopped = True
continue
ret.append(event)
return ret
def game_stopped(self):
return self._stopped
def toggled_full_screen(self):
return self._toggle_full_screen
def wait(self):
event = pygame.event.wait()
if self._is_quit(event):
pygame.quit()
sys.exit(0)
return event
def _is_quit(self, event, metaPressed = None):
if event.type == pygame.QUIT:
return True
if metaPressed is None:
mods = pygame.key.get_mods()
metaPressed = mods & pygame.KMOD_META
if event.type == pygame.KEYDOWN and event.key == pygame.K_q and metaPressed:
return True
return False
|
from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/MEG.csv", delimiter=",", dtype=str)
ad = ad[1500:]
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
# decision = []
date = []
c_open = []
c_high = []
c_low = []
c_close = []
c_volume = []
c_date = []
c_start = start + 18
for x in range(finish - start - 6):
c_open.append(float(pd[c_start][1]))
c_high.append(float(pd[c_start][2]))
c_low.append(float(pd[c_start][3]))
c_close.append(float(pd[c_start][4]))
c_volume.append(float(pd[c_start][5]))
c_date.append(pd[c_start][0])
c_start = c_start + 1
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
# decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
decision = "sell"
min_forecast = min(c_low)
max_forecast = max(c_high)
if close[-1] * 1.06 < max_forecast:
decision = "buy"
# for z in all_prices:
# if close[-1] * 1.03 < z:
# decision = "buy"
sma = convolve_sma(close, 5)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close) - len(smb)):
smb.append(smb[-1] + diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k")
dx = fig.add_subplot(111)
# mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
if decision == "sell":
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 18)
iter = iter + 2
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 21 20:05:06 2017
@author: pd
"""
from IPython import get_ipython
get_ipython().magic('reset -sf')
from sklearn import datasets
import numpy as np
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
X, y = make_classification(1000, n_features=5,
random_state=5)
lr = LogisticRegression(class_weight='auto')
#'penalty': ['l1', 'l2'],
# 'C': [1, 2, 3, 4, 5]
|
from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=30)
content = models.TextField()
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return f'{self.title}'
|
import matplotlib.pyplot as plt
x = [2, 6, 9, 1]
y = [8, 3, 7, 1]
plt.plot(x,y)
plt.title('line')
plt.xlabel('x')
plt.ylabel('y')
plt.grid(axis='both')
plt.show()
|
import json
import os
import pickle
import re
import shutil
from pathlib import Path
def read_file(filepath, encoding="utf-8"):
"""Read text from a file."""
try:
with open(filepath, encoding=encoding) as f:
return f.read()
except FileNotFoundError as e:
raise ValueError(f"File '{filepath}' does not exist.") from e
def read_json_file(filepath):
"""Read json from a file."""
try:
with open(filepath, "r", encoding="utf-8") as f:
data = json.load(f)
return data
except ValueError as e:
abspath = os.path.abspath(filepath)
raise ValueError(f"Failed to read json from '{abspath}") from e
def json_to_string(js_dic, **kwargs):
"""Converts a dictionary to a string."""
indent = kwargs.pop("indent", 2)
ensure_ascii = kwargs.pop("ensure_ascii", False)
return json.dumps(js_dic,
indent=indent,
ensure_ascii=ensure_ascii,
**kwargs)
def write_text_file(content, filepath, encoding='utf-8', append=False):
"""Writes text to a file.
Args:
content: The content to write.
filepath: The path to which the content should be written.
encoding: The encoding which should be used.
append: Whether to append to the file or to truncate the file.
"""
mode = "a" if append else "w"
with open(filepath, mode, encoding=encoding) as file:
file.write(content)
def write_json_file(js_obj, filepath, **kwargs):
"""Writes object to a json_file."""
json_string = json_to_string(js_obj, **kwargs)
write_text_file(json_string, filepath)
def load_pickle(filepath):
"""Loads an object from pickle."""
try:
with open(filepath, "rb") as f:
return pickle.load(f)
except FileNotFoundError as e:
raise ValueError(f"File '{filepath}' does not exist.") from e
def save_pickle(filepath, obj):
"""Saves object to a pickle."""
with open(filepath, "wb") as f:
pickle.dump(obj, f)
def delete_directory_tree(filepath):
shutil.rmtree(filepath)
def _ext_regex(ext):
return re.compile(f".*(\.{ext})")
def _is_match(regex, string):
return regex.match(string) is not None
def _any_match(regex_list, string):
matches = [_is_match(regex, string) for regex in regex_list]
return any(matches)
def listdir_with_exclusions(path, names=None, ext=None):
"""Lists items in the directory excludes name regexes or extensions."""
dir_ = Path(path)
if not dir_.is_dir():
raise ValueError('path must be a directory.')
names = names if names else []
ext = ext if ext else []
name_regex = [re.compile(expr) for expr in names]
ext_regex = [_ext_regex(e) for e in ext]
files = [
fname for fname in dir_.iterdir()
if not _any_match(name_regex, fname.name)
]
files = [fname for fname in files if not _any_match(ext_regex, fname.name)]
return files
def listdir_with_only(path, names=None, ext=None):
"""Lists items in the directory conforming name regexes or extensions."""
dir_ = Path(path)
if not dir_.is_dir():
raise ValueError('path must be a directory.')
names = names if names else []
ext = ext if ext else []
name_regex = [re.compile(expr) for expr in names]
ext_regex = [_ext_regex(e) for e in ext]
files_name = set(fname for fname in dir_.iterdir()
if _any_match(name_regex, fname.name))
files_regex = set(fname for fname in dir_.iterdir()
if _any_match(ext_regex, fname.name))
files = list(files_name.union(files_regex))
return files
|
from creator.cli import cli
__all__ = ['cli']
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateAnnouncementSubscriptionDetails(object):
"""
The details for creating a new announcement subscription.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateAnnouncementSubscriptionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this CreateAnnouncementSubscriptionDetails.
:type display_name: str
:param description:
The value to assign to the description property of this CreateAnnouncementSubscriptionDetails.
:type description: str
:param compartment_id:
The value to assign to the compartment_id property of this CreateAnnouncementSubscriptionDetails.
:type compartment_id: str
:param ons_topic_id:
The value to assign to the ons_topic_id property of this CreateAnnouncementSubscriptionDetails.
:type ons_topic_id: str
:param filter_groups:
The value to assign to the filter_groups property of this CreateAnnouncementSubscriptionDetails.
:type filter_groups: dict(str, FilterGroupDetails)
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateAnnouncementSubscriptionDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateAnnouncementSubscriptionDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'description': 'str',
'compartment_id': 'str',
'ons_topic_id': 'str',
'filter_groups': 'dict(str, FilterGroupDetails)',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'description': 'description',
'compartment_id': 'compartmentId',
'ons_topic_id': 'onsTopicId',
'filter_groups': 'filterGroups',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._description = None
self._compartment_id = None
self._ons_topic_id = None
self._filter_groups = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CreateAnnouncementSubscriptionDetails.
A user-friendly name for the announcement subscription. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:return: The display_name of this CreateAnnouncementSubscriptionDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateAnnouncementSubscriptionDetails.
A user-friendly name for the announcement subscription. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this CreateAnnouncementSubscriptionDetails.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this CreateAnnouncementSubscriptionDetails.
A description of the announcement subscription. Avoid entering confidential information.
:return: The description of this CreateAnnouncementSubscriptionDetails.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this CreateAnnouncementSubscriptionDetails.
A description of the announcement subscription. Avoid entering confidential information.
:param description: The description of this CreateAnnouncementSubscriptionDetails.
:type: str
"""
self._description = description
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this CreateAnnouncementSubscriptionDetails.
The `OCID`__ of the compartment where you want to create the announcement subscription.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this CreateAnnouncementSubscriptionDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this CreateAnnouncementSubscriptionDetails.
The `OCID`__ of the compartment where you want to create the announcement subscription.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this CreateAnnouncementSubscriptionDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def ons_topic_id(self):
"""
**[Required]** Gets the ons_topic_id of this CreateAnnouncementSubscriptionDetails.
The OCID of the Notifications service topic that is the target for publishing announcements that match the configured announcement subscription. The caller of the operation needs the ONS_TOPIC_PUBLISH permission for the targeted Notifications service topic. For more information about Notifications permissions, see `Details for Notifications`__.
__ https://docs.cloud.oracle.com/Content/Identity/policyreference/notificationpolicyreference.htm
:return: The ons_topic_id of this CreateAnnouncementSubscriptionDetails.
:rtype: str
"""
return self._ons_topic_id
@ons_topic_id.setter
def ons_topic_id(self, ons_topic_id):
"""
Sets the ons_topic_id of this CreateAnnouncementSubscriptionDetails.
The OCID of the Notifications service topic that is the target for publishing announcements that match the configured announcement subscription. The caller of the operation needs the ONS_TOPIC_PUBLISH permission for the targeted Notifications service topic. For more information about Notifications permissions, see `Details for Notifications`__.
__ https://docs.cloud.oracle.com/Content/Identity/policyreference/notificationpolicyreference.htm
:param ons_topic_id: The ons_topic_id of this CreateAnnouncementSubscriptionDetails.
:type: str
"""
self._ons_topic_id = ons_topic_id
@property
def filter_groups(self):
"""
Gets the filter_groups of this CreateAnnouncementSubscriptionDetails.
A list of filter groups for the announcement subscription. A filter group combines one or more filters that the Announcements service applies to announcements for matching purposes.
:return: The filter_groups of this CreateAnnouncementSubscriptionDetails.
:rtype: dict(str, FilterGroupDetails)
"""
return self._filter_groups
@filter_groups.setter
def filter_groups(self, filter_groups):
"""
Sets the filter_groups of this CreateAnnouncementSubscriptionDetails.
A list of filter groups for the announcement subscription. A filter group combines one or more filters that the Announcements service applies to announcements for matching purposes.
:param filter_groups: The filter_groups of this CreateAnnouncementSubscriptionDetails.
:type: dict(str, FilterGroupDetails)
"""
self._filter_groups = filter_groups
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateAnnouncementSubscriptionDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this CreateAnnouncementSubscriptionDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateAnnouncementSubscriptionDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this CreateAnnouncementSubscriptionDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateAnnouncementSubscriptionDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this CreateAnnouncementSubscriptionDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateAnnouncementSubscriptionDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this CreateAnnouncementSubscriptionDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import ddt
import webob
import webob.exc
from manila.api import common
from manila import test
from manila.tests.api import fakes
from manila.tests.db import fakes as db_fakes
class LimiterTest(test.TestCase):
"""Unit tests for the `manila.api.common.limited` method.
Takes in a list of items and, depending on the 'offset' and
'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = list(range(1))
self.small = list(range(10))
self.medium = list(range(1000))
self.large = list(range(10000))
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual(self.small[10:], common.limited(self.small, req))
self.assertEqual(self.medium[10:], common.limited(self.medium, req))
self.assertEqual(self.large[10:1010], common.limited(self.large, req))
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual([], common.limited(self.small, req))
self.assertEqual([], common.limited(self.medium, req))
self.assertEqual(
self.large[1001:2001], common.limited(self.large, req))
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium[:10], common.limited(self.medium, req))
self.assertEqual(self.large[:10], common.limited(self.large, req))
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = list(range(2000))
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(items[1:4], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req))
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = list(range(2000))
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
items[1:4], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req, max_limit=2000))
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for the `manila.api.common.get_pagination_params` method.
Takes in a request object and returns 'marker' and 'limit' GET
params.
"""
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
self.assertEqual({}, common.get_pagination_params(req))
def test_valid_marker(self):
"""Test valid marker param."""
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual({'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'},
common.get_pagination_params(req))
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
self.assertEqual({'limit': 10}, common.get_pagination_params(req))
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual({'marker': marker, 'limit': 20},
common.get_pagination_params(req))
@ddt.ddt
class MiscFunctionsTest(test.TestCase):
@ddt.data(
('http://manila.example.com/v2/b2d18606-2673-4965-885a-4f5a8b955b9b/',
'http://manila.example.com/b2d18606-2673-4965-885a-4f5a8b955b9b/'),
('http://manila.example.com/v1/',
'http://manila.example.com/'),
('http://manila.example.com/share/v2.22/',
'http://manila.example.com/share/'),
('http://manila.example.com/share/v1/'
'b2d18606-2673-4965-885a-4f5a8b955b9b/',
'http://manila.example.com/share/'
'b2d18606-2673-4965-885a-4f5a8b955b9b/'),
('http://10.10.10.10:3366/v1/',
'http://10.10.10.10:3366/'),
('http://10.10.10.10:3366/v2/b2d18606-2673-4965-885a-4f5a8b955b9b/',
'http://10.10.10.10:3366/b2d18606-2673-4965-885a-4f5a8b955b9b/'),
('http://manila.example.com:3366/v1.1/',
'http://manila.example.com:3366/'),
('http://manila.example.com:3366/v2/'
'b2d18606-2673-4965-885a-4f5a8b955b9b/',
'http://manila.example.com:3366/'
'b2d18606-2673-4965-885a-4f5a8b955b9b/'))
@ddt.unpack
def test_remove_version_from_href(self, fixture, expected):
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
@ddt.data('http://manila.example.com/1.1/shares',
'http://manila.example.com/v/shares',
'http://manila.example.com/v1.1shares')
def test_remove_version_from_href_bad_request(self, fixture):
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_validate_cephx_id_invalid_with_period(self):
self.assertRaises(webob.exc.HTTPBadRequest,
common.validate_cephx_id,
"client.manila")
def test_validate_cephx_id_invalid_with_non_ascii(self):
self.assertRaises(webob.exc.HTTPBadRequest,
common.validate_cephx_id,
u"bj\u00F6rn")
@ddt.data("alice", "alice_bob", "alice bob")
def test_validate_cephx_id_valid(self, test_id):
common.validate_cephx_id(test_id)
@ddt.data(['ip', '1.1.1.1', False, False], ['user', 'alice', False, False],
['cert', 'alice', False, False], ['cephx', 'alice', True, False],
['user', 'alice$', False, False],
['user', 'test group name', False, False],
['user', 'group$.-_\'`{}', False, False],
['ip', '172.24.41.0/24', False, False],
['ip', '1001::1001', False, True],
['ip', '1001::1000/120', False, True])
@ddt.unpack
def test_validate_access(self, access_type, access_to, ceph, enable_ipv6):
common.validate_access(access_type=access_type, access_to=access_to,
enable_ceph=ceph, enable_ipv6=enable_ipv6)
@ddt.data(['ip', 'alice', False], ['ip', '1.1.1.0/10/12', False],
['ip', '255.255.255.265', False], ['ip', '1.1.1.0/34', False],
['cert', '', False], ['cephx', 'client.alice', True],
['group', 'alice', True], ['cephx', 'alice', False],
['cephx', '', True], ['user', 'bob/', False],
['user', 'group<>', False], ['user', '+=*?group', False],
['ip', '1001::1001/256', False],
['ip', '1001:1001/256', False],)
@ddt.unpack
def test_validate_access_exception(self, access_type, access_to, ceph):
self.assertRaises(webob.exc.HTTPBadRequest, common.validate_access,
access_type=access_type, access_to=access_to,
enable_ceph=ceph)
@ddt.ddt
class ViewBuilderTest(test.TestCase):
def setUp(self):
super(ViewBuilderTest, self).setUp()
self.expected_resource_dict = {
'id': 'fake_resource_id',
'foo': 'quz',
'fred': 'bob',
'alice': 'waldo',
'spoon': 'spam',
'xyzzy': 'qwerty',
}
self.fake_resource = db_fakes.FakeModel(self.expected_resource_dict)
self.view_builder = fakes.FakeResourceViewBuilder()
@ddt.data('1.0', '1.40')
def test_versioned_method_no_updates(self, version):
req = fakes.HTTPRequest.blank('/my_resource', version=version)
actual_resource = self.view_builder.view(req, self.fake_resource)
self.assertEqual(set({'id', 'foo', 'fred', 'alice'}),
set(actual_resource.keys()))
@ddt.data(True, False)
def test_versioned_method_v1_6(self, is_admin):
req = fakes.HTTPRequest.blank('/my_resource', version='1.6',
use_admin_context=is_admin)
expected_keys = set({'id', 'foo', 'fred', 'alice'})
if is_admin:
expected_keys.add('spoon')
actual_resource = self.view_builder.view(req, self.fake_resource)
self.assertEqual(expected_keys, set(actual_resource.keys()))
@ddt.unpack
@ddt.data({'is_admin': True, 'version': '3.14'},
{'is_admin': False, 'version': '3.14'},
{'is_admin': False, 'version': '6.2'},
{'is_admin': True, 'version': '6.2'})
def test_versioned_method_all_match(self, is_admin, version):
req = fakes.HTTPRequest.blank(
'/my_resource', version=version, use_admin_context=is_admin)
expected_keys = set({'id', 'fred', 'xyzzy', 'alice'})
if is_admin:
expected_keys.add('spoon')
actual_resource = self.view_builder.view(req, self.fake_resource)
self.assertEqual(expected_keys, set(actual_resource.keys()))
|
def partition(arr,beg,end):
i = beg-1
pivot = arr[end]
for j in range(beg,end):
if arr[j]>pivot:
i+=1
arr[i],arr[j]=arr[j],arr[i]
arr[i+1],arr[end]=arr[end],arr[i+1]
return i+1
def quicksort(arr,beg,end):
if len(arr)==1:
return arr
if beg<end:
p = partition(arr,beg,end)
quicksort(arr,beg,p-1)
quicksort(arr,p+1,end)
myList = [54,26,93,17,77,31,44,55,20]
n=len(myList)
quicksort(myList,0,n-1)
print(myList)
|
"""
Variáveis de mensagens em português brasileiro.
"""
import discord
# Variáveis
teste = "teste - Locales - PT_BR"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.