content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import json
from src.api_reader import get_members
from src.intellisense import IntellisenseSchema
from src.version import schema_version, sdk_go_version
if __name__ == '__main__':
model_dir = os.path.join(os.path.dirname(__file__), "model")
api_file = os.path.join(model_dir, "api.json")
api = json.load(open(api_file))
doc_file = os.path.join(model_dir, "docs.json")
doc = json.load(open(doc_file))
operation = 'RegisterTaskDefinitionRequest'
if operation not in api['shapes']:
sys.exit('Operation "{op}" not found under "shapes"'.format(op=operation))
reference, required = get_members(api, operation)
intellisense = IntellisenseSchema(api, doc, schema_version, sdk_go_version)
schema = intellisense.build(reference, required, operation)
schema_dir = os.path.join(model_dir, "schema")
intellisense.write(schema_dir, schema)
|
nilq/baby-python
|
python
|
from setuptools import setup
import platform
if platform.system() == 'Windows':
setup(
name='imagesimilarity',
version='0.1.2',
packages=[''],
url='https://github.com/marvinferber/imagesimilarity',
license='Apache License 2.0',
author='Marvin Ferber',
author_email='ferbhome@freenet.de',
description='Find and display images that are similar.',
install_requires=[
'wxPython>=4',
'Pillow>=7',
'tensorflow==2.0.2',
'tensorflow_hub',
'annoy>=1.17',
'setuptools==44',
'pyinstaller @ https://github.com/pyinstaller/pyinstaller/archive/develop.tar.gz'
]
)
else:
setup(
name='imagesimilarity',
version='0.1.2',
packages=[''],
url='https://github.com/marvinferber/imagesimilarity',
license='Apache License 2.0',
author='Marvin Ferber',
author_email='ferbhome@freenet.de',
description='Find and display images that are similar.',
install_requires=[
'wxPython>=4',
'Pillow>=7',
'tensorflow==2.0.2',
'tensorflow_hub',
'annoy>=1.17',
'setuptools==44',
'pyinstaller @ https://github.com/pyinstaller/pyinstaller/archive/develop.tar.gz'
]
)
|
nilq/baby-python
|
python
|
from bytecodemanipulation import (
CodeOptimiser,
Emulator,
InstructionMatchers,
MutableCodeObject,
OptimiserAnnotations,
)
from bytecodemanipulation.TransformationHelper import BytecodePatchHelper
from bytecodemanipulation.Transformers import TransformationHandler
from bytecodemanipulation.util import Opcodes
|
nilq/baby-python
|
python
|
#o objetivo desse programa é escrever na tela a taboada do número que o usuário digitar.
n = int(input('Digite um número para ver sua taboada: '))
print('-=' * 10)
print("{} x {:2} = {} ".format(n,1, n*1))
print("{} x {:2} = {} ".format(n,2, n*2))
print("{} x {:2} = {} ".format(n,3, n*3))
print("{} x {:2} = {} ".format(n,4, n*4))
print("{} x {:2} = {} ".format(n,5, n*5))
print("{} x {:2} = {} ".format(n,6, n*6))
print("{} x {:2} = {} ".format(n,7, n*7))
print("{} x {:2} = {} ".format(n,8, n*8))
print("{} x {:2} = {} ".format(n,9, n*9))
print("{} x {:2} = {} ".format(n,10, n*10))
print('-=' * 10)
|
nilq/baby-python
|
python
|
# project/server/models.py
from flask import current_app
from project.server import db, bcrypt
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
projects = db.relationship('Project', backref='users', lazy=True)
def __init__(self, email, password, admin=False):
self.email = email
self.password = bcrypt.generate_password_hash(
password, current_app.config.get('BCRYPT_LOG_ROUNDS')
).decode('utf-8')
self.admin = admin
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User {0}>'.format(self.email)
class Project(db.Model):
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(120), nullable=False)
url = db.Column(db.String, nullable=False)
status = db.Column(db.Boolean, nullable=False, default=False)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id'),
nullable=False
)
builds = db.relationship('Build', backref='builds', lazy=True)
def __init__(self, user_id, name, url, status=False):
self.user_id = user_id
self.name = name
self.url = url
self.status = status
class Build(db.Model):
__tablename__ = 'builds'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
status = db.Column(db.Boolean, nullable=False)
datetime = db.Column(db.DateTime, nullable=False)
project_id = db.Column(
db.Integer,
db.ForeignKey('projects.id'),
nullable=False
)
def __init__(self, project_id, status, datetime):
self.project_id = project_id
self.status = status
self.datetime = datetime
def to_json(self):
return {
'id': self.id,
'project_id': self.project_id,
'status': self.status,
'datetime': self.datetime
}
|
nilq/baby-python
|
python
|
# Copyright (C) 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
"""Entry point of tests.common.*.
"""
from .base import (
MaybeModT, Base
)
from .constants import (
TESTS_DIR, TESTS_RES_DIR, RULES_DIR,
)
from .testcases import (
RuleTestCase, CliTestCase
)
__all__ = [
'TESTS_DIR', 'TESTS_RES_DIR', 'RULES_DIR',
'MaybeModT', 'Base', 'RuleTestCase', 'CliTestCase',
]
|
nilq/baby-python
|
python
|
import pytest
import logging
from multiprocessing.process import current_process
from threading import current_thread
import time
logging.basicConfig(filename="log.txt", filemode="w")
log = logging.getLogger()
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
def pytest_configure(config):
print("pytest_configure")
logging.info("pytest_configure")
# d("configure")
# if not hasattr(config, 'slaveinput'):
# d("slave input")
def pytest_sessionstart(session):
logging.info("pytest_sessionstart")
print("pytest_sessionstart")
# d("session start")
def pytest_runtest_setup(item):
# called for running each test in 'a' directory
print ("setting up", item)
|
nilq/baby-python
|
python
|
from os import listdir
import core.log as log
async def main(message, client, serverdata):
#Part 1
commandfiles = listdir("./core/commands")
commandList = []
#Check if Command is a file
for commands in commandfiles:
if commands.endswith('.py'):
commandList.append(commands.replace(".py", ""))
#Get Variables
messageContentList = message.content.split()
command = messageContentList.pop(0).replace("-","").lower()
args = messageContentList
#Execute Command
if command in commandList:
commandexecute = __import__('core.commands.{}'.format(command), fromlist=[None])
await commandexecute.main(message, args, client, serverdata)
await log.command.main(message, serverdata)
else:
if str(message.guild.id) in serverdata:
commandfiles = listdir("./core/commands/special")
commandList = []
#Check if Command is a file
for commands in commandfiles:
if commands.endswith('.py'):
commandList.append(commands.replace(".py", ""))
#Get Variables
messageContentList = message.content.split()
command = messageContentList.pop(0).replace("-","").lower()
args = messageContentList
#Execute Command
if command not in commandList:
return
commandexecute = __import__('core.commands.special.{}'.format(command), fromlist=[None])
await commandexecute.main(message, args, client, serverdata)
await log.command.main(message, serverdata)
|
nilq/baby-python
|
python
|
"""
=====================
Fitting a light curve
=====================
This example shows how to fit the parameters of a SALT2 model to photometric
light curve data.
First, we'll load an example of some photometric data.
"""
import sncosmo
data = sncosmo.load_example_data()
print(data)
#####################################################################
# An important additional note: a table of photometric data has a
# ``band`` column and a ``zpsys`` column that use strings to identify
# the bandpass (e.g., ``'sdssg'``) and zeropoint system (``'ab'``) of
# each observation. If the bandpass and zeropoint systems in your data
# are *not* built-ins known to sncosmo, you must register the
# corresponding `~sncosmo.Bandpass` or `~sncosmo.MagSystem` to the
# right string identifier using the registry.
# create a model
model = sncosmo.Model(source='salt2')
# run the fit
result, fitted_model = sncosmo.fit_lc(
data, model,
['z', 't0', 'x0', 'x1', 'c'], # parameters of model to vary
bounds={'z':(0.3, 0.7)}) # bounds on parameters (if any)
#####################################################################
# The first object returned is a dictionary-like object where the keys
# can be accessed as attributes in addition to the typical dictionary
# lookup like ``result['ncall']``:
print("Number of chi^2 function calls:", result.ncall)
print("Number of degrees of freedom in fit:", result.ndof)
print("chi^2 value at minimum:", result.chisq)
print("model parameters:", result.param_names)
print("best-fit values:", result.parameters)
print("The result contains the following attributes:\n", result.keys())
##################################################################
# The second object returned is a shallow copy of the input model with
# the parameters set to the best fit values. The input model is
# unchanged.
sncosmo.plot_lc(data, model=fitted_model, errors=result.errors)
#######################################################################
# Suppose we already know the redshift of the supernova we're trying to
# fit. We want to set the model's redshift to the known value, and then
# make sure not to vary `z` in the fit.
model.set(z=0.5) # set the model's redshift.
result, fitted_model = sncosmo.fit_lc(data, model,
['t0', 'x0', 'x1', 'c'])
sncosmo.plot_lc(data, model=fitted_model, errors=result.errors)
|
nilq/baby-python
|
python
|
#!/bin/python3
# Copyright (C) 2017 Quentin "Naccyde" Deslandes.
# Redistribution and use of this file is allowed according to the terms of the MIT license.
# For details see the LICENSE file distributed with yall.
import sys
import os
import requests
import json
import argparse
import subprocess
import fnmatch
owner = 'naccyde'
repo = 'yall'
prefixPath = 'build/out/packages'
requiredDistros = {
'deb' : {
'ubuntu' : [ 'xenial', 'yakkety', 'zesty', 'artful', 'bionic' ],
'debian' : [ 'jessie', 'wheezy', 'stretch', 'buster' ]
},
'rpm' : {
'fedora' : [ '25', '26', '27', '28' ]
}
}
def findDistroIds(requiredDistros, pcDistributions):
distrosPackages = {}
for pcExt in pcDistributions:
if not pcExt in requiredDistros:
continue
distrosPackages[pcExt] = { 'ids' : [], 'filename' : [] }
for pcDistro in pcDistributions[pcExt]:
if not pcDistro['index_name'] in requiredDistros[pcExt]:
continue
versions = requiredDistros[pcExt][pcDistro['index_name']]
for pcVersion in pcDistro['versions']:
if not pcVersion['index_name'] in versions:
continue
distrosPackages[pcExt]['ids'].append(pcVersion['id'])
return distrosPackages
def getArtefacts(folder, extensionFilter):
files = [f for f in os.listdir(folder)]
return fnmatch.filter(files, extensionFilter)
class HttpApi:
def isStatusValid(self, statusCode):
return 200 <= statusCode <= 299
def get(self, url, headers={}):
re = requests.get(url, headers=headers)
return re.status_code, re.text
def post(self, url, headers={}, data={}, files={}):
re = requests.post(url, headers=headers, json=data, files=files)
return re.status_code, re.text
class PackageCloudApi(HttpApi):
def __init__(self, owner, repo, token):
self.owner = owner
self.repo = repo
self.token = token
self.apiUrl = 'https://{}:@packagecloud.io/api/v1'.format(token)
def getDistributions(self):
url = self.apiUrl + '/distributions.json'
status, text = self.get(url)
return status, json.loads(text)
def uploadPackage(self, distroId, filename):
url = self.apiUrl + '/repos/{}/{}/packages.json'.format(self.owner, self.repo)
file = {
'package[distro_version_id]': (None, str(distroId)),
'package[package_file]': (filename, open(prefixPath + '/' + filename, 'rb')),
}
status, text = self.post(url, files=file)
return status, json.loads(text)
def uploadPackages(self, distrosPackages={}):
for distro in distrosPackages:
for distroId in distrosPackages[distro]['ids']:
for filename in distrosPackages[distro]['filename']:
print('\t\t[+] Uploading', filename, 'to', distroId)
status, text = self.uploadPackage(distroId, filename)
if not 200 <= status <= 299:
print('\t\t\t[-] ERROR: {}, HTTP {} : {}'.format(filename, status, text))
class GithubApi(HttpApi):
apiUrl = 'https://api.github.com'
uploadUrl = 'https://uploads.github.com'
genericHeaders = { 'Accept' : 'application/vnd.github.v3+json' }
def __init__(self, owner, repo, token):
self.owner = owner
self.repo = repo
self.genericHeaders['Authorization'] = 'token ' + token
def getReleases(self):
url = self.apiUrl + '/repos/{}/{}/releases'.format(self.owner, self.repo)
status, text = self.get(url, self.genericHeaders)
return json.loads(text) if self.isStatusValid(status) else None
def getRelease(self, tag):
releases = self.getReleases()
for release in releases:
if release['tag_name'] == tag:
return release
return None
def createRelease(self, tag_name, target_commitish, name, body, draft=False, prerelease=False):
url = self.apiUrl + '/repos/{}/{}/releases'.format(self.owner, self.repo)
data = {
'tag_name' : tag_name,
'target_commitish' : target_commitish,
'name' : name,
'body' : body,
'draft' : draft,
'prerelease' : prerelease
}
status, text = self.post(url, headers=self.genericHeaders, data=data)
if not self.isStatusValid(status):
raise Exception('Could not create release:', status, text)
return json.loads(text)
def uploadReleaseAsset(self, release, filename):
url = self.uploadUrl + '/repos/{}/{}/releases/{}/assets?name={}'.format(self.owner, self.repo, release['id'], filename)
headers = { 'Content-Type' : 'application/zip' }
headers.update(self.genericHeaders)
file = { 'file' : (filename, open(prefixPath + '/' + filename, 'rb'))}
status, text = self.post(url, headers, None, file)
return json.loads(text) if self.isStatusValid(status) else None
def uploadReleaseAssets(self, release, files):
for file in files:
self.uploadReleaseAsset(release, file)
def getReleaseMessage(changelog, tag):
s = """
Each `yall` Linux release is available from `.deb` and `.rpm` repositories :
* `.deb` : `curl -s https://packagecloud.io/install/repositories/naccyde/yall/script.deb.sh | sudo bash`
* `.rpm` : `curl -s https://packagecloud.io/install/repositories/naccyde/yall/script.rpm.sh | sudo bash`
You can then install `yall` and `yall-dev` using your package manager. The following distributions are supported :
* Debian : `wheezy (7)`, `jessie (8)`, `stretch (9)`, `buster (10)`
* Ubuntu : `Trusty Tarh (14.04)`, `Xenial Xerus (16.04)`, `Artful Ardvark (17.10)`, `Bionic Beaver (18.04)`
* Fedora : `25`, `26`, `27`
If your distribution is not supported, you can open an issue to ask to its support.
"""
return changelog + s
def main():
parser = argparse.ArgumentParser(description='Script used to deploy yall releases')
parser.add_argument('-g', '--github-token', required=True, help='Github token')
parser.add_argument('-p', '--package-cloud-token', required=True, help='Package Cloud token')
parser.add_argument('-t', '--tag', required=True, help='Tag of the release')
parser.add_argument('-z', '--zip', action='store_true', help='Deploy .zip artefacts')
parser.add_argument('-l', '--linux', action='store_true', help='Deploy .deb and .rpm artefacts')
args = parser.parse_args()
lastChangelog = ""
with open("CHANGELOG.md", "r") as file:
lastChangelogWVersion = file.read().split("\n\n")[2]
lastChangelog = '\n'.join(lastChangelogWVersion.split("\n")[1:])
print('=== yall release ===\n')
print('\t[+] Creating release {}\n'.format(args.tag))
# Create Github release
githubApi = GithubApi(owner, repo, args.github_token)
release = githubApi.getRelease(args.tag)
if not release:
print('\t[+] Creating release', args.tag)
release = githubApi.createRelease(args.tag, 'master', args.tag, getReleaseMessage(lastChangelog, args.tag))
else:
print('\t[.] Release', args.tag, 'already exists')
if args.zip:
print('\t[+] Deploying .zip artefacts')
zipArtefacts = getArtefacts(prefixPath, '*.zip')
githubApi.uploadReleaseAssets(release, zipArtefacts)
if args.linux:
print('\t[+] Deploying .deb and .rpm artefacts')
packageCloudApi = PackageCloudApi(owner, 'yall', args.package_cloud_token)
distrosPackages = findDistroIds(requiredDistros, packageCloudApi.getDistributions()[1])
distrosPackages['deb']['filename'] = getArtefacts(prefixPath, '*.deb')
distrosPackages['rpm']['filename'] = getArtefacts(prefixPath, '*.rpm')
packageCloudApi.uploadPackages(distrosPackages)
print('\t[+] RELEASED !')
if __name__== "__main__":
main()
|
nilq/baby-python
|
python
|
# reverse words in a string
# " " output is wrong lol
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
reverse = []
temp = ""
for i in s:
if i == " ":
if temp != "":
reverse.append(temp)
temp = ""
else:
temp = temp + i
if temp != "":
reverse.append(temp)
return " ".join(reverse[::-1])
solution = Solution()
print(","+solution.reverseWords(" ")+',')
|
nilq/baby-python
|
python
|
import sys
from os.path import join, isfile
import threading
import importlib.util as iutil
from uuid import uuid4
from multiprocessing.dummy import Pool as ThreadPool
from datetime import datetime
from aequilibrae.project.data import Matrices
from aequilibrae.paths.multi_threaded_skimming import MultiThreadedNetworkSkimming
from aequilibrae.paths.results.skim_results import SkimResults
from aequilibrae.utils import WorkerThread
from aequilibrae import logger
try:
from aequilibrae.paths.AoN import skimming_single_origin
except ImportError as ie:
logger.warning(f"Could not import procedures from the binary. {ie.args}")
spec = iutil.find_spec("PyQt5")
pyqt = spec is not None
if pyqt:
from PyQt5.QtCore import pyqtSignal
spec = iutil.find_spec("openmatrix")
has_omx = spec is not None
sys.dont_write_bytecode = True
class NetworkSkimming(WorkerThread):
"""
::
from aequilibrae.paths.network_skimming import NetworkSkimming
from aequilibrae.project import Project
project = Project()
project.open(self.proj_dir)
network = self.project.network
network.build_graphs()
graph = network.graphs['c']
graph.set_graph(cost_field="distance")
graph.set_skimming("distance")
skm = NetworkSkimming(graph)
skm.execute()
# The skim report (if any error generated) is available here
skm.report
# To access the skim matrix directly from its temporary file
matrix = skm.results.skims
# Or you can save the results to disk
skm.save_to_project('skimming result')
# Or specify the AequilibraE's matrix file format
skm.save_to_project('skimming result', 'aem')
project.close()
"""
if pyqt:
skimming = pyqtSignal(object)
def __init__(self, graph, origins=None):
WorkerThread.__init__(self, None)
self.origins = origins
self.graph = graph
self.results = SkimResults()
self.aux_res = MultiThreadedNetworkSkimming()
self.report = []
self.procedure_id = ""
self.procedure_date = ""
self.cumulative = 0
def doWork(self):
self.execute()
def execute(self):
"""Runs the skimming process as specified in the graph"""
if pyqt:
self.skimming.emit(["zones finalized", 0])
self.results.prepare(self.graph)
self.aux_res = MultiThreadedNetworkSkimming()
self.aux_res.prepare(self.graph, self.results)
pool = ThreadPool(self.results.cores)
all_threads = {"count": 0}
for orig in list(self.graph.centroids):
i = int(self.graph.nodes_to_indices[orig])
if i >= self.graph.nodes_to_indices.shape[0]:
self.report.append(f"Centroid {orig} is beyond the domain of the graph")
elif self.graph.fs[int(i)] == self.graph.fs[int(i) + 1]:
self.report.append(f"Centroid {orig} does not exist in the graph")
else:
pool.apply_async(self.__func_skim_thread, args=(orig, all_threads))
pool.close()
pool.join()
self.aux_res = None
self.procedure_id = uuid4().hex
self.procedure_date = str(datetime.today())
if pyqt:
self.skimming.emit(["text skimming", "Saving Outputs"])
self.skimming.emit(["finished_threaded_procedure", None])
def save_to_project(self, name: str, format="omx") -> None:
"""Saves skim results to the project folder and creates record in the database
Args:
*name* (:obj:`str`): Name of the matrix. Same value for matrix record name and file (plus extension)
*format* (:obj:`str`, `Optional`): File format ('aem' or 'omx'). Default is 'omx'
"""
file_name = f"{name}.{format.lower()}"
mats = Matrices()
record = mats.new_record(name, file_name, self.results.skims)
record.procedure_id = self.procedure_id
record.timestamp = self.procedure_date
record.procedure = "Network skimming"
record.save()
def __func_skim_thread(self, origin, all_threads):
if threading.get_ident() in all_threads:
th = all_threads[threading.get_ident()]
else:
all_threads[threading.get_ident()] = all_threads["count"]
th = all_threads["count"]
all_threads["count"] += 1
x = skimming_single_origin(origin, self.graph, self.results, self.aux_res, th)
self.cumulative += 1
if x != origin:
self.report.append(x)
if pyqt:
self.skimming.emit(["zones finalized", self.cumulative])
txt = str(self.cumulative) + " / " + str(self.matrix.zones)
self.skimming.emit(["text skimming", txt])
|
nilq/baby-python
|
python
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from os import makedirs, path
import subprocess
import re
# -- Project information -----------------------------------------------------
project = 'ENRICO'
copyright = '2019, UChicago Argonne, LLC'
author = 'ENRICO Development Team'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.katex', # 'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
numfig = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap-astropy'
html_theme_options = {'logotext1': 'ENRICO', 'logotext2': '', 'logotext3': ''}
html_show_sphinx = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Breathe configuration ---------------------------------------------------
# breathe_projects = {"enrico": "doxygen/xml"}
# breathe_default_project = "enrico"
# -- Build Doxygen ---------------------------------------------------
def build_doxygen(app):
# XML goes in Sphinx source dir, and HTML goes in Sphinx output dir
doxygen_xmldir = path.abspath(path.join(app.srcdir, 'doxygen', 'xml'))
doxygen_htmldir = path.abspath(path.join(app.outdir, 'doxygen', 'html'))
# Doxygen won't create *nested* output dirs, so we do it ourselves.
for d in (doxygen_xmldir, doxygen_htmldir):
makedirs(d, exist_ok=True)
# Need to know location of Doxyfile, so we'll assume its location relative to Sphinx srcdir
doxyfile_dir = path.dirname(path.dirname(app.srcdir))
# To pass output dirs to Doxygen, we follow this advice:
# http://www.doxygen.nl/manual/faq.html#faq_cmdline
# Here we read the Doxyfile into a string, replace the *_OUTPUT vars, and pass the string as
# stdin to the doxygen subprocess
with open(path.join(doxyfile_dir, 'Doxyfile')) as f:
doxy_opts = f.read()
doxy_opts = re.sub(r'(\bHTML_OUTPUT\b\s*=\s*).*', r'\1"{}"'.format(doxygen_htmldir),
doxy_opts)
doxy_opts = re.sub(r'(\bXML_OUTPUT\b\s*=\s*).*', r'\1"{}"'.format(doxygen_xmldir), doxy_opts)
subprocess.run(['doxygen', '-'], cwd=doxyfile_dir, input=doxy_opts, universal_newlines=True,
check=True)
# -- Setup hooks -------------------------------------------------------------
def setup(app):
app.add_css_file('theme_overrides.css')
app.connect("builder-inited", build_doxygen)
|
nilq/baby-python
|
python
|
# License: BSD 3 clause
import tick.base
import tick.base_model.build.base_model
from .model_hawkes_expkern_leastsq import ModelHawkesExpKernLeastSq
from .model_hawkes_expkern_loglik import ModelHawkesExpKernLogLik
from .model_hawkes_sumexpkern_leastsq import ModelHawkesSumExpKernLeastSq
from .model_hawkes_sumexpkern_loglik import ModelHawkesSumExpKernLogLik
__all__ = [
"ModelHawkesExpKernLogLik", "ModelHawkesSumExpKernLogLik",
"ModelHawkesExpKernLeastSq", "ModelHawkesSumExpKernLeastSq"
]
|
nilq/baby-python
|
python
|
from radar import db
__all__ = ['Commit']
class Commit(db.Model):
id = db.Column(db.Integer, primary_key=True)
commit_hash = db.Column(db.String(40))
summary = db.Column(db.String(100))
branch = db.Column(db.String(50))
author = db.Column(db.String(100))
commit_time = db.Column(db.DateTime)
__table__args = (db.UniqueConstraint(commit_hash, branch))
|
nilq/baby-python
|
python
|
from django.contrib.gis.db import models
class Mansion(models.Model):
class Meta:
db_table = 'mansion'
gid = models.BigAutoField(primary_key=True)
housing_area_code = models.BigIntegerField(null=False)
facility_key = models.CharField(max_length=4000, null=True)
shape_wkt = models.MultiLineStringField(null=False, geography=True)
fabricated_type_code = models.BigIntegerField(null=True)
pref = models.CharField(max_length=4000, null=True)
created_by = models.CharField(max_length=4000, null=True)
created_at = models.DateTimeField(null=True)
updated_by = models.CharField(max_length=4000, null=True)
updated_at = models.DateTimeField(null=True)
|
nilq/baby-python
|
python
|
"""Create svg images from a keyboard definition."""
import xml.etree.ElementTree as ET
import io
from math import sin, cos, atan2, degrees, radians
from kbtb.plate import generate_plate
def shape_to_svg_element(shape, props={}, x_scale=1, y_scale=-1):
return ET.Element(
"path", {
"d":
" M " + " ".join(f"{x_scale*x},{y_scale*y}"
for x, y in shape.exterior.coords) + " Z " +
" ".join((" M " + " ".join(f"{x_scale*x},{y_scale*y}"
for x, y in i.coords) + " Z ")
for i in shape.interiors),
**props,
})
def shape_to_svg(shape, props={}, x_scale=1, y_scale=-1):
# Calculate viewbox from shape bounds
x_min, y_min, x_max, y_max = shape.bounds
left = min(x_min * x_scale, x_max * x_scale)
top = min(y_min * y_scale, y_max * y_scale)
width = abs(x_scale * x_min - x_scale * x_max)
height = abs(y_scale * y_min - y_scale * y_max)
# Create the empty svg tree
root = ET.Element(
'svg', {
"viewBox": f"{left} {top} {width} {height}",
"xmlns": "http://www.w3.org/2000/svg",
"xmlns:xlink": "http://www.w3.org/1999/xlink",
**props,
})
root.append(shape_to_svg_element(shape, x_scale=x_scale, y_scale=y_scale))
return ET.ElementTree(root)
def keyboard_to_layout_svg(kb, add_numbers=True):
plate = generate_plate(kb)
x_scale = 1
y_scale = -1
# Calculate viewbox from plate bounds
x_min, y_min, x_max, y_max = plate.bounds
left = min(x_min * x_scale, x_max * x_scale)
top = min(y_min * y_scale, y_max * y_scale)
width = abs(x_scale * x_min - x_scale * x_max)
height = abs(y_scale * y_min - y_scale * y_max)
# Create the empty svg tree
root = ET.Element(
'svg', {
"viewBox": f"{left} {top} {width} {height}",
"xmlns": "http://www.w3.org/2000/svg",
"xmlns:xlink": "http://www.w3.org/1999/xlink",
})
root.append(ET.Comment(f'physical-dimensions: {width} mm by {height} mm'))
# Add groups for document structure
g_plate = ET.SubElement(root, "g", {
"id": "plate",
"style": "fill: black; fill-rule: evenodd;",
})
g_plate = ET.SubElement(g_plate, "g", {"id": "plate"})
g_keycaps = ET.SubElement(root, "g", {
"id": "keycaps",
"style": "fill: white;"
})
# Add plate
ET.SubElement(
g_plate, "path", {
"d":
" M " + " ".join(f"{x_scale*x},{y_scale*y}"
for x, y in plate.exterior.coords) + " Z " +
" ".join((" M " + " ".join(f"{x_scale*x},{y_scale*y}"
for x, y in i.coords) + " Z ")
for i in plate.interiors)
})
g_plate.append(
shape_to_svg_element(plate, {"style": "fill: black;"}, x_scale,
y_scale))
for i, key in enumerate(kb.keys):
x, y = x_scale * key.pose.x, y_scale * key.pose.y
r = degrees(
atan2(y_scale * sin(radians(key.pose.r - 90)),
x_scale * cos(radians(key.pose.r - 90)))) + 90
keyboard_unit = 19.05
margin = keyboard_unit - 18.42
ET.SubElement(
g_keycaps, "rect", {
"width": str(keyboard_unit * key.unit_width - margin),
"height": str(keyboard_unit * key.unit_height - margin),
"x": str((keyboard_unit * key.unit_width - margin) / -2),
"y": str((keyboard_unit * key.unit_height - margin) / -2),
"rx": "1",
"transform": f"translate({x} {y}) rotate({r})"
})
if add_numbers:
ET.SubElement(
g_keycaps, "text", {
"style":
"fill: black; font-family: sans-serif; font-size: 5;",
"transform": f"translate({x} {y}) rotate({180+r}) ",
"alignment-baseline": "middle",
"text-anchor": "middle",
}).text = f"{i}"
return ET.ElementTree(root)
def svg_to_file(svg):
f = io.BytesIO()
svg.write(f)
return f.getvalue()
|
nilq/baby-python
|
python
|
# 工具类,字符串处理
import re
class BFStringDeal(object):
def __init__(self,arg):
self.arg = arg
@classmethod
# 删除垃圾字符 -- 比如:\n
def specialTXT(cls, text):
return text.replace("\n", "")
@classmethod
# 正则表达式处理,字符串
def getAssignContent(cls, text, assignContent):
# 获取正则表达式实例,其中assignContent为外界传入的表达式值
regx = re.compile(assignContent)
return regx.findall(text)
@classmethod
# 删除html前尾部标签,传入值为单个标签 p or h1
# 常用tag标签有【h1,h2,h3,h4,h5,a,span,img,p】 -- 其中img需要单独处理下
def deleteHtmlTag(cls, originalTxt):
# 外部输入进来,tag,在此处合成 -- 例如 tag-h1 output <h1.*?>.*?</h1>
tagCollection = ['p','h1','h2','h3','h4','a','p','span']
for tag in tagCollection:
tagCompelete = "<" + tag + ".*?" + '>|' + '</' + tag + '>'
regx = re.compile(tagCompelete)
hasDealTag = regx.sub("",originalTxt)
# 删除h1,h2,p中含有标签a的情况
if "</a>" in hasDealTag:
tagCompelete = "<" + 'a' + ".*?" + '>|' + '</' + 'a' + '>'
regx = re.compile(tagCompelete)
hasDealTag = regx.sub("",originalTxt)
# 删除h1,h2,p中含有标签span的情况
if "</span>" in hasDealTag:
tagCompelete = "<" + 'span' + ".*?" + '>|' + '</' + 'span' + '>'
regx = re.compile(tagCompelete)
hasDealTag = regx.sub("",originalTxt)
# 含有img的情况以后处理
return hasDealTag
@classmethod
# 删除头尾tag标签信息 -- 目前还有一点错误,就是删除尾部tag,不一定删除的是最后的
# eg:传入<div class="fasdfd">something so many <div>ssss</div></div>
# 得到something so many <div>ssss</div>
def deleteFrontAndBackendTag(cls,content):
# 删除tag前缀
frontTag = "<.*?>"
regxFront = re.compile(frontTag)
frontDelContent = regxFront.sub("",content,1)
# 删除tag后缀 == 这里只是需要删除</xx>就可,不一定会删除最后一个
backendTag = "</.*?>"
regxBack = re.compile(backendTag)
backendDelContent = regxBack.sub("",frontDelContent,1)
return backendDelContent
@classmethod
# 删除给定文本的所有tag
# eg:传入<div class="fasdfd">something so many <div>ssss</div></div>
# 得到something so many ssss
def deleteAllTag(cls, content):
frontTag = "<.*?>"
regxFront = re.compile(frontTag)
frontDelContent = regxFront.sub("",content)
backendTag = "</.*?>"
regxBack = re.compile(backendTag)
backendDelContent = regxBack.sub("",frontDelContent)
return backendDelContent
|
nilq/baby-python
|
python
|
__author__ = 'Alexander Horkun'
__email__ = 'mindkilleralexs@gmail.com'
from django.conf.urls import patterns, url
from xanderhorkunspider.web.websites.views import websites, auth
urlpatterns = patterns('',
url(r'^$', websites.index_view, name='index'),
url(r'^add-website$', websites.edit_website_view, name='add_website'),
url(r'^edit-website/(?P<wid>\d+)$', websites.edit_website_view, name='edit_website'),
url(r'^delete_website/(?P<wid>\d+)$', websites.delete_website_view, name='delete_website'),
url(r'^add-page', websites.edit_page_view, name='add_page'),
url(r'^website/(?P<wid>\d+)/add-page', websites.edit_page_view, name='add_page_to_website'),
url(r'^edit-page/(?P<pid>\d+)', websites.edit_page_view, name='edit_page'),
url(r'^delete_page/(\d+)$', websites.delete_page_view, name='delete_page'),
url(r'^spider_session/webiste-(?P<wid>\d+)$', websites.spider_session_view,
name='spider_session'),
url(r'^spider_session$', websites.start_spider_session_view, name='start_spider_session'),
url(r'^spider-status/(.+)$', websites.spider_status_view, name='spider_status'),
url(r'^sign-up$', auth.signup_view, name='signup'),
url('logout', auth.logout_view, name='logout'),
url('login', auth.login_view, name='login'),
)
|
nilq/baby-python
|
python
|
"""
opbeat.contrib.django.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from opbeat.contrib.celery import CeleryMixin
from opbeat.contrib.django import DjangoClient
class CeleryClient(CeleryMixin, DjangoClient):
pass
|
nilq/baby-python
|
python
|
# from glob import glob
from setuptools import setup
setup(
name='pybrightsign',
version='0.9.4',
description='BrightSign APIs for humans. Python module to simplify using the BrightSign BSN/BSNEE API.',
long_description=open('../README.md').read(),
long_description_content_type='text/markdown',
license='MIT',
# https://pypi.org/classifiers/
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Adaptive Technologies',
'Topic :: Utilities'
],
url='https://github.com/pointw-dev/pybrightsign',
author='Michael Ottoson',
author_email='michael@pointw.com',
packages=['pybrightsign'],
include_package_data=True,
install_requires=[
'requests',
'oauthlib==2.1.0',
'requests-oauthlib==1.1.0'
],
# scripts=glob('bin/*'),
zip_safe=False
)
|
nilq/baby-python
|
python
|
# coding: utf-8
# 2019/10/17 @ tongshiwei
|
nilq/baby-python
|
python
|
from curses import meta
import shutil
from unittest import TestCase
import sys
import os
import metadata_mp3
import shutil
import unittest
from mutagen.easyid3 import EasyID3
class TestRenameSongName(TestCase):
def test_1(self):
songNameBefore = "Counting Crows - Colorblind (Official Video)"
songNameAfter = "Counting Crows - Colorblind"
songNameAfterTest = metadata_mp3.rename_song_name(songNameBefore)
self.assertEqual(songNameAfter, songNameAfterTest)
def test_2(self):
songNameBefore = "Counting Crows - Colorblind test"
songNameAfter = "Counting Crows - Colorblind"
songNameAfterTest = metadata_mp3.rename_song_name(songNameBefore)
self.assertNotEqual(songNameAfter, songNameAfterTest)
class TestConvertSongnameOnMetadata(TestCase):
def test_1(self):
songNameBefore = "Counting Crows - Colorblind"
metadataSongName = metadata_mp3.convert_songname_on_metadata(songNameBefore)
self.assertEqual(metadataSongName['artist'], "Counting Crows")
self.assertEqual(metadataSongName['title'], "Colorblind")
def test_2(self):
songNameBefore = "Counting Crows - Colorblind test"
metadataSongName = metadata_mp3.convert_songname_on_metadata(songNameBefore)
self.assertEqual(metadataSongName['artist'], "Counting Crows")
self.assertEqual(metadataSongName['title'], "Colorblind test")
class TestAddMetadataSong(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName = "Counting Crows - Colorblind.mp3"
songNameTest = "Counting Crows - Colorblind"
artistTest = "Counting Crows"
titleTest = "Colorblind"
albumTest = "album test"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
testFileNameWithPath = os.path.join(currentDirectory,testFileName)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFileNameWithPath = metadata_mp3.add_metadata_song(currentDirectory,albumTest, artistTest, songNameTest)
metatag = EasyID3(newFileNameWithPath)
print(newFileNameWithPath)
self.assertTrue(os.path.isfile(newFileNameWithPath))
self.assertEqual(newFileNameWithPath, testFileNameWithPath)
self.assertEqual(metatag['artist'][0], artistTest)
self.assertEqual(metatag['title'][0], titleTest)
self.assertEqual(metatag['album'][0], albumTest)
os.remove(newFileNameWithPath)
def test_2(self):
originalTestFileName = "test.mp3"
fileNameTest = "Counting Crows - Colorblind (Official Video).mp3"
songNameTest = "Counting Crows - Colorblind (Official Video)"
artistTest = "Counting Crows"
titleTest = "Colorblind"
albumTest = "album test"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
testFileNameWithPath = os.path.join(currentDirectory,fileNameTest)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFileNameWithPath = metadata_mp3.add_metadata_song(currentDirectory,albumTest, artistTest, songNameTest)
self.assertFalse(os.path.isfile(testFileNameWithPath))
self.assertTrue(os.path.isfile(newFileNameWithPath))
self.assertNotEqual(newFileNameWithPath, testFileNameWithPath)
metatag = EasyID3(newFileNameWithPath)
print(newFileNameWithPath)
self.assertEqual(metatag['artist'][0], artistTest)
self.assertEqual(metatag['title'][0], titleTest)
self.assertEqual(metatag['album'][0], albumTest)
os.remove(newFileNameWithPath)
class TestAddMetadataPlaylist(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName = "Counting Crows - Colorblind.mp3"
songNameTest = "Counting Crows - Colorblind"
artistTest = "Counting Crows"
titleTest = "Colorblind"
albumTest = "spokojne-sad"
trackNumberTest = 1
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
albumDirectory = os.path.join(currentDirectory,albumTest)
if not os.path.exists(albumDirectory):
os.mkdir(albumDirectory)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFileNameWithPath = metadata_mp3.add_metadata_playlist(currentDirectory,trackNumberTest,albumTest,artistTest,songNameTest)
#print(newFileNameWithPath)
self.assertTrue(os.path.isfile(newFileNameWithPath))
self.assertEqual(newFileNameWithPath, testFileNameWithPath)
metatag = EasyID3(newFileNameWithPath)
self.assertEqual(metatag['artist'][0], artistTest)
self.assertEqual(metatag['title'][0], titleTest)
self.assertEqual(metatag['album'][0], "YT "+albumTest)
self.assertEqual(metatag['tracknumber'][0],str(trackNumberTest))
shutil.rmtree(os.path.join(currentDirectory,albumTest))
class TestUpdateMetadataYoutube(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName1 = "Counting Crows - Colorblind.mp3"
testFileName2 = "Eels - I Need Some Sleep.mp3"
testFileName3 = "Paramore - The Only Exception.mp3"
artistTestList = []
artistTestList.append("Counting Crows")
titleTestList = []
titleTestList.append("Colorblind")
artistTestList.append("Eels")
titleTestList.append("I Need Some Sleep")
artistTestList.append("Paramore")
titleTestList.append("The Only Exception")
albumTest = "spokojne-sad"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
albumDirectory = os.path.join(currentDirectory,albumTest)
if not os.path.exists(albumDirectory):
os.mkdir(albumDirectory)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName1)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName2)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName3)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFilesList = metadata_mp3.update_metadata_youtube(currentDirectory,albumTest)
i = 0
for newFile in newFilesList:
print(newFile)
self.assertTrue(os.path.isfile(newFile))
metatag = EasyID3(newFile)
self.assertEqual(metatag['artist'][0], artistTestList[i])
self.assertEqual(metatag['title'][0], titleTestList[i])
self.assertEqual(metatag['album'][0], "YT "+albumTest)
i = i+1
shutil.rmtree(os.path.join(currentDirectory,albumTest))
class TestUpdateMetadata(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName1 = "Counting Crows - Colorblind.mp3"
testFileName2 = "Eels - I Need Some Sleep.mp3"
testFileName3 = "Paramore - The Only Exception.mp3"
artistTestList = []
artistTestList.append("Counting Crows")
titleTestList = []
titleTestList.append("Colorblind")
artistTestList.append("Eels")
titleTestList.append("I Need Some Sleep")
artistTestList.append("Paramore")
titleTestList.append("The Only Exception")
albumTest = "spokojne-sad"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
albumDirectory = os.path.join(currentDirectory,albumTest)
if not os.path.exists(albumDirectory):
os.mkdir(albumDirectory)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName1)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName2)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName3)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFilesList = metadata_mp3.update_metadata(albumDirectory,albumTest)
i = 0
for newFile in newFilesList:
print(newFile)
self.assertTrue(os.path.isfile(newFile))
metatag = EasyID3(newFile)
self.assertEqual(metatag['artist'][0], artistTestList[i])
self.assertEqual(metatag['title'][0], titleTestList[i])
self.assertEqual(metatag['album'][0], albumTest)
i = i+1
shutil.rmtree(os.path.join(currentDirectory,albumTest))
class TestSetAlbum(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName1 = "test1.mp3"
testFileName2 = "test2.mp3"
testCatalog = "test_1"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
testCatalogWithPath = os.path.join(currentDirectory, testCatalog)
if not os.path.exists(testCatalogWithPath):
os.mkdir(testCatalogWithPath)
testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName1)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName2)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFilesList = metadata_mp3.setAlbum(testCatalogWithPath, "album test")
for newFile in newFilesList:
newFileWithPath = os.path.join(testCatalogWithPath,newFile)
self.assertTrue(os.path.isfile(newFileWithPath))
metatag = EasyID3(newFileWithPath)
self.assertEqual(metatag['album'][0], "album test")
shutil.rmtree(os.path.join(currentDirectory,testCatalog))
class TestSetArtist(TestCase):
def test_1(self):
originalTestFileName = "test.mp3"
testFileName1 = "test1.mp3"
testFileName2 = "test2.mp3"
testCatalog = "test_1"
currentDirectory = os.path.dirname(os.path.realpath(__file__))
originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName)
testCatalogWithPath = os.path.join(currentDirectory, testCatalog)
if not os.path.exists(testCatalogWithPath):
os.mkdir(testCatalogWithPath)
testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName1)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName2)
shutil.copy(originalTestFileNameWithPath, testFileNameWithPath)
newFilesList = metadata_mp3.setArtist(testCatalogWithPath, "artist test")
for newFile in newFilesList:
newFileWithPath = os.path.join(testCatalogWithPath,newFile)
self.assertTrue(os.path.isfile(newFileWithPath))
metatag = EasyID3(newFileWithPath)
self.assertEqual(metatag['artist'][0], "artist test")
shutil.rmtree(os.path.join(currentDirectory,testCatalog))
if __name__=='__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding=utf-8
"""Writes uninstallation SQL script to stdout."""
from os.path import abspath, join, dirname
import sys
def uninstall():
with open(join(dirname(abspath(__file__)), 'uninstall.sql')) as f:
sys.stdout.write(f.read())
if __name__ == '__main__':
uninstall()
|
nilq/baby-python
|
python
|
import os
from PIL import Image, ImageDraw
from pylab import *
import csv
class ImageScatterPlot:
def __init__(self):
self.h, self.w = 20000,20000
self.resize_h = 275
self.resize_w = 275
def create_save_fig(self, image_paths, projected_features, out_file):
img_scatter = self.create_fig(image_paths, projected_features)
self.save_fig(img_scatter, out_file)
def create_fig(self, image_paths, projected_features):
img = Image.new('RGB',(self.w,self.h),(255,255,255))
draw = ImageDraw.Draw(img)
scale = abs(projected_features).max(0)
scaled = floor(array([ (p / scale) * (self.w/2-20,self.h/2-20) + (self.w/2,self.h/2) for p in projected_features]))
print "number of images", len(image_paths)
for i in range(len(image_paths)):
nodeim = Image.open(image_paths[i])
nodeim = nodeim.resize((self.resize_w,self.resize_h))
ns = nodeim.size
img.paste(nodeim,(int(scaled[i][0]-ns[0]//2),int(scaled[i][1]-ns[1]//2),int(scaled[i][0]+ns[0]//2+1),int(scaled[i][1]+ns[1]//2+1)))
return img
def save_fig(self, img, out_file):
img.save(out_file)
if __name__ == "__main__":
in_file = "PNAR-tsne-HOG-color.csv"
out_file = "res-class.jpg"
rows = []
with open(in_file, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
rows.pop(0)
image_paths = [row[0] for row in rows]
features = array([(float(row[1]), float(row[2])) for row in rows])
ImageScatterPlot().create_save_fig(image_paths = image_paths, projected_features = features, out_file = out_file)
|
nilq/baby-python
|
python
|
# Lagoon (2400004) | Zero's Temple (320000000)
from net.swordie.ms.loaders import StringData
options = []
al = chr.getAvatarData().getAvatarLook()
selection = sm.sendNext("Hello. How can I help you? #b\r\n"
"#L0#Change hair colour#l\r\n"
"#L1#Change eye colour#l\r\n"
"#L2#Change skin tone#l")
if selection == 0:
hairColour = al.getHair() % 10
baseHair = al.getHair() - hairColour
for colour in range(8):
colourOption = baseHair + colour
options.append(colourOption)
answer = sm.sendAskAvatar("Choose your new hair colour!", False, False, options)
if answer < len(options):
sm.changeCharacterLook(options[answer])
elif selection == 1:
faceColour = al.getFace() % 1000 - al.getFace() % 100
baseFace = al.getFace() - faceColour
for colour in range(0, 900, 100):
colourOption = baseFace + colour
if not StringData.getItemStringById(colourOption) is None:
options.append(colourOption)
answer = sm.sendAskAvatar("With our specialized machine, you can see the results of your potential treatment in advance. "
"What kind of lens would you like to wear? Please choose the style of your liking.", False, False, options)
if answer < len(options):
sm.changeCharacterLook(options[answer])
else:
#These values will crash the client when attempting to load them onto character
nullSkins = [6, 7, 8]
for skin in range(14):
#Skip past null skin values
if skin in nullSkins:
continue
options.append(skin)
answer = sm.sendAskAvatar("We have the latest in beauty equipment. "
"With our technology, you can preview what your skin will look like in advance! "
"Which treatment would you like?", False, False, options)
if answer < len(options):
sm.changeCharacterLook(options[answer])
|
nilq/baby-python
|
python
|
def a_method():
pass
class AClass:
pass
var = "A Variable"
print("Support library name: {}".format(__name__))
if __name__ == '__main__':
age = 0
while age <= 0:
age = int(input("How old are you? "))
|
nilq/baby-python
|
python
|
'''
Manage file shares that use the SMB 3.0 protocol.
'''
from ... pyaz_utils import _call_az
from . import copy, metadata
def list(share_name, account_key=None, account_name=None, connection_string=None, exclude_dir=None, marker=None, num_results=None, path=None, sas_token=None, snapshot=None, timeout=None):
'''
List files and directories in a share.
Required Parameters:
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- exclude_dir -- None
- marker -- An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped.
- num_results -- Specify the maximum number to return. If the request does not specify num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remaining of the results. Provide "*" to return all.
- path -- The directory path within the file share.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- snapshot -- A string that represents the snapshot version, if applicable.
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file list", locals())
def delete(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, timeout=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file delete", locals())
def resize(path, share_name, size, account_key=None, account_name=None, connection_string=None, sas_token=None, timeout=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
- size -- The length to resize the file to.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file resize", locals())
def url(path, share_name, account_key=None, account_name=None, connection_string=None, protocol=None, sas_token=None):
'''
Create the url to access a file.
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- protocol -- Protocol to use.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
'''
return _call_az("az storage file url", locals())
def generate_sas(path, share_name, account_key=None, account_name=None, cache_control=None, connection_string=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None, expiry=None, https_only=None, ip=None, permissions=None, policy_name=None, start=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- cache_control -- Response header value for Cache-Control when resource is accessed using this shared access signature.
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- content_disposition -- Response header value for Content-Disposition when resource is accessed using this shared access signature.
- content_encoding -- Response header value for Content-Encoding when resource is accessed using this shared access signature.
- content_language -- Response header value for Content-Language when resource is accessed using this shared access signature.
- content_type -- Response header value for Content-Type when resource is accessed using this shared access signature.
- expiry -- Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes invalid. Do not use if a stored access policy is referenced with --id that specifies this value.
- https_only -- Only permit requests made with the HTTPS protocol. If omitted, requests from both the HTTP and HTTPS protocol are permitted.
- ip -- Specifies the IP address or range of IP addresses from which to accept requests. Supports only IPv4 style addresses.
- permissions -- The permissions the SAS grants. Allowed values: (c)reate (d)elete (r)ead (w)rite (c)reate (d)elete (r)ead (w)rite. Do not use if a stored access policy is referenced with --id that specifies this value. Can be combined.
- policy_name -- The name of a stored access policy within the container's ACL.
- start -- Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes valid. Do not use if a stored access policy is referenced with --id that specifies this value. Defaults to the time of the request.
'''
return _call_az("az storage file generate-sas", locals())
def show(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, snapshot=None, timeout=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- snapshot -- A string that represents the snapshot version, if applicable.
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file show", locals())
def update(path, share_name, account_key=None, account_name=None, clear_content_settings=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, sas_token=None, timeout=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- clear_content_settings -- If this flag is set, then if any one or more of the following properties (--content-cache-control, --content-disposition, --content-encoding, --content-language, --content-md5, --content-type) is set, then all of these properties are set together. If a value is not provided for a given property when at least one of the properties listed below is set, then that property will be cleared.
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- content_cache_control -- The cache control string.
- content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata.
- content_encoding -- The content encoding type.
- content_language -- The content language.
- content_md5 -- The content's MD5 hash.
- content_type -- The content MIME type.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file update", locals())
def exists(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, snapshot=None, timeout=None):
'''
Check for the existence of a file.
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- snapshot -- A string that represents the snapshot version, if applicable.
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file exists", locals())
def download(path, share_name, account_key=None, account_name=None, connection_string=None, dest=None, end_range=None, max_connections=None, no_progress=None, open_mode=None, sas_token=None, snapshot=None, start_range=None, timeout=None, validate_content=None):
'''
Required Parameters:
- path -- The path to the file within the file share.
- share_name -- The file share name.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- dest -- Path of the file to write to. The source filename will be used if not specified.
- end_range -- End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file.
- max_connections -- If set to 2 or greater, an initial get will be done for the first self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, the method returns at this point. If it is not, it will download the remaining data parallel using the number of threads equal to max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. If set to 1, a single large get request will be done. This is not generally recommended but available if very few threads should be used, network requests are very expensive, or a non-seekable stream prevents parallel download. This may also be valuable if the file is being concurrently modified to enforce atomicity or if many files are expected to be empty as an extra request is required for empty files if max_connections is greater than 1.
- no_progress -- Include this flag to disable progress reporting for the command.
- open_mode -- Mode to use when opening the file. Note that specifying append only open_mode prevents parallel download. So, max_connections must be set to 1 if this open_mode is used.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- snapshot -- A string that represents the snapshot version, if applicable.
- start_range -- Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file.
- timeout -- Request timeout in seconds. Applies to each call to the service.
- validate_content -- If set to true, validates an MD5 hash for each retrieved portion of the file. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that the service will only return transactional MD5s for chunks 4MB or less so the first get request will be of size self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be thrown. As computing the MD5 takes processing time and more requests will need to be done due to the reduced chunk size there may be some increase in latency.
'''
return _call_az("az storage file download", locals())
def upload(share_name, source, account_key=None, account_name=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, max_connections=None, metadata=None, no_progress=None, path=None, sas_token=None, timeout=None, validate_content=None):
'''
Upload a file to a share that uses the SMB 3.0 protocol.
Required Parameters:
- share_name -- The file share name.
- source -- Path of the local file to upload as the file content.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- content_cache_control -- The cache control string.
- content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata.
- content_encoding -- The content encoding type.
- content_language -- The content language.
- content_md5 -- The content's MD5 hash.
- content_type -- The content MIME type.
- max_connections -- Maximum number of parallel connections to use.
- metadata -- Metadata in space-separated key=value pairs. This overwrites any existing metadata.
- no_progress -- Include this flag to disable progress reporting for the command.
- path -- The path to the file within the file share. If the file name is omitted, the source file name will be used.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
- validate_content -- If true, calculates an MD5 hash for each range of the file. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the file.
'''
return _call_az("az storage file upload", locals())
def upload_batch(destination, source, account_key=None, account_name=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, destination_path=None, dryrun=None, max_connections=None, metadata=None, no_progress=None, pattern=None, sas_token=None, validate_content=None):
'''
Upload files from a local directory to an Azure Storage File Share in a batch operation.
Required Parameters:
- destination -- None
- source -- None
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- content_cache_control -- The cache control string.
- content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata.
- content_encoding -- The content encoding type.
- content_language -- The content language.
- content_md5 -- The content's MD5 hash.
- content_type -- The content MIME type.
- destination_path -- None
- dryrun -- None
- max_connections -- None
- metadata -- Metadata in space-separated key=value pairs. This overwrites any existing metadata.
- no_progress -- Include this flag to disable progress reporting for the command.
- pattern -- None
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- validate_content -- None
'''
return _call_az("az storage file upload-batch", locals())
def download_batch(destination, source, account_key=None, account_name=None, connection_string=None, dryrun=None, max_connections=None, no_progress=None, pattern=None, sas_token=None, snapshot=None, validate_content=None):
'''
Download files from an Azure Storage File Share to a local directory in a batch operation.
Required Parameters:
- destination -- None
- source -- None
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- dryrun -- None
- max_connections -- None
- no_progress -- Include this flag to disable progress reporting for the command.
- pattern -- None
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- snapshot -- None
- validate_content -- None
'''
return _call_az("az storage file download-batch", locals())
def delete_batch(source, account_key=None, account_name=None, connection_string=None, dryrun=None, pattern=None, sas_token=None, timeout=None):
'''
Delete files from an Azure Storage File Share.
Required Parameters:
- source -- None
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- dryrun -- None
- pattern -- None
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage file delete-batch", locals())
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import uuid
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.test.helpers import setup_device
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.device.models import DevicePermissions
DUMMY_PASSWORD = "password"
class ChannelOrderTestCase(APITestCase):
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def setUp(self):
self.facility, self.superuser = setup_device()
self.learner = FacilityUser.objects.create(
username="learner", facility=self.facility
)
self.learner.set_password(DUMMY_PASSWORD)
self.learner.save()
channel = ChannelMetadata.objects.get(id=self.the_channel_id)
channel.root.available = True
channel.root.save()
self.url = reverse("kolibri:kolibri.plugins.device:devicechannelorder")
def test_learner_cannot_post(self):
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, [], format="json")
self.assertEqual(response.status_code, 403)
def test_can_manage_content_can_post(self):
DevicePermissions.objects.create(user=self.learner, can_manage_content=True)
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, [], format="json")
self.assertNotEqual(response.status_code, 403)
def test_superuser_can_post(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, [], format="json")
self.assertNotEqual(response.status_code, 403)
def test_error_wrong_number_of_uuids(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
response = self.client.post(
self.url, [self.the_channel_id, uuid.uuid4().hex], format="json"
)
self.assertEqual(response.status_code, 400)
def test_error_invalid_uuid(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, ["test"], format="json")
self.assertEqual(response.status_code, 400)
def test_error_not_array(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, {}, format="json")
self.assertEqual(response.status_code, 400)
def test_set_order_one(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
response = self.client.post(self.url, [self.the_channel_id], format="json")
channel = ChannelMetadata.objects.get(id=self.the_channel_id)
self.assertEqual(response.status_code, 200)
self.assertEqual(channel.order, 1)
def test_set_order_two(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
new_channel_id = uuid.uuid4().hex
new_channel = ChannelMetadata.objects.create(
id=new_channel_id,
name="Test",
root=ContentNode.objects.create(
title="test",
id=uuid.uuid4().hex,
channel_id=new_channel_id,
content_id=uuid.uuid4().hex,
available=True,
),
)
response = self.client.post(
self.url, [self.the_channel_id, new_channel.id], format="json"
)
self.assertEqual(response.status_code, 200)
channel = ChannelMetadata.objects.get(id=self.the_channel_id)
new_channel.refresh_from_db()
self.assertEqual(channel.order, 1)
self.assertEqual(new_channel.order, 2)
def test_set_order_two_one_unavailable(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
new_channel_id = uuid.uuid4().hex
new_channel = ChannelMetadata.objects.create(
id=new_channel_id,
name="Test",
root=ContentNode.objects.create(
title="test",
id=uuid.uuid4().hex,
channel_id=new_channel_id,
content_id=uuid.uuid4().hex,
available=False,
),
)
response = self.client.post(
self.url, [self.the_channel_id, new_channel.id], format="json"
)
self.assertEqual(response.status_code, 400)
def test_set_order_two_reorder(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
new_channel_id = uuid.uuid4().hex
new_channel = ChannelMetadata.objects.create(
id=new_channel_id,
name="Test",
root=ContentNode.objects.create(
title="test",
id=uuid.uuid4().hex,
channel_id=new_channel_id,
content_id=uuid.uuid4().hex,
available=True,
),
order=1,
)
channel = ChannelMetadata.objects.get(id=self.the_channel_id)
channel.order = 2
channel.save()
response = self.client.post(
self.url, [self.the_channel_id, new_channel.id], format="json"
)
self.assertEqual(response.status_code, 200)
new_channel.refresh_from_db()
channel.refresh_from_db()
self.assertEqual(channel.order, 1)
self.assertEqual(new_channel.order, 2)
|
nilq/baby-python
|
python
|
"""Tests for the config.config-module
"""
# System library imports
from collections import namedtuple
from datetime import date, datetime
import pathlib
import re
import sys
# Third party imports
import pytest
# Midgard imports
from midgard.config import config
from midgard.collections import enums
from midgard.dev import exceptions
#
# Helper functions
#
EntryTestCase = namedtuple("EntryTestCase", ("type", "cfg_value", "value"))
def normalize_whitespace(string):
"""Normalize whitespace in string
Deletes consecutive spaces and newlines
"""
return re.sub("\n+", "\n", re.sub(" +", " ", string))
def only_word_characters(string):
"""Filter out only word characters from the string"""
return re.sub("\W", "", string)
#
# Test configuration
#
@pytest.fixture
def config_file():
"""A test configuration read from file"""
cfg = config.Configuration("file")
cfg_path = pathlib.Path(__file__).parent / "test_config.conf"
cfg.update_from_file(cfg_path)
cfg_vars = dict(var_1="one", var_2="two")
cfg.update_vars(cfg_vars)
return cfg
@pytest.fixture
def config_options():
"""A test configuration based on (mocked) command line options"""
cfg = config.Configuration("options")
cfg_argv = [
sys.argv[0],
"not_an_option",
"--section_1:foo=bar",
"--section_1:pi=3.14",
"--section_2:foo=baz",
"--just_a_flag",
"--non_existing_config:section_1:foo=none",
"--options:section_3:name=options",
"--section_1:pi=3.1415",
]
remember_sys_argv, sys.argv = sys.argv, cfg_argv
cfg.update_from_options(allow_new=True)
sys.argv = remember_sys_argv
return cfg
@pytest.fixture
def config_dict(gps_dict):
"""A test configuration based on a dictionary"""
cfg = config.Configuration("dictionary")
cfg.update_from_dict(gps_dict, section="gps")
return cfg
@pytest.fixture
def gps_dict():
"""A dictionary with GPS test data"""
return dict(gps_f1=1575.42, gps_f2=1227.60, gps_f5=1176.45, gps_name="Global Positioning System")
@pytest.fixture
def config_section(config_dict):
"""A section with test data"""
return config_dict.gps
#
# Tests
#
def test_read_config_from_file(config_file):
"""Test that reading a configuration from file works"""
assert len(config_file.sections) > 0
assert len(config_file.sources) == 1
assert list(config_file.sources)[0].endswith("test_config.conf")
def test_read_config_from_file_classmethod(config_file):
"""Test that reading a configuration from file works using the classmethod"""
cfg_path = pathlib.Path(__file__).parent / "test_config.conf"
cfg = config.Configuration.read_from_file("test", cfg_path)
assert cfg.as_str() == config_file.as_str()
@pytest.mark.skip(reason="as_str() does not print profiles correctly")
def test_write_config_to_file(config_file, tmpdir):
"""Test that writing a configuration creates a file that is identical to the original"""
cfg_path = pathlib.Path("".join(config_file.sources))
out_path = pathlib.Path(tmpdir / "test_config.conf")
config_file.write_to_file(out_path)
assert normalize_whitespace(cfg_path.read_text()) == normalize_whitespace(out_path.read_text())
def test_read_config_from_dict(config_dict):
"""Test that reading a configuration from a dict works"""
assert len(config_dict.sections) > 0
assert len(config_dict.sources) == 1
assert list(config_dict.sources)[0] == "dictionary"
def test_read_config_from_options(config_options):
"""Test that reading a configuration from a options works"""
assert len(config_options.sections) > 0
assert len(config_options.sources) > 0
assert all(s.startswith("command line") for s in config_options.sources)
def test_update_config_from_config_section(config_file, config_options):
"""Test that a config section can be copied"""
assert "section_1" not in config_file.section_names
config_file.update_from_config_section(config_options.section_1)
assert "section_1" in config_file.section_names
assert str(config_file.section_1) == str(config_options.section_1)
def test_update_config_from_options(config_file):
"""Test that a config can be updated from command line options"""
config_file.master_section = "midgard"
sections_before = set(config_file.section_names)
entries_before = set(config_file.midgard.as_list())
cfg_argv = [
sys.argv[0],
"not_an_option",
"--foo=I am an option",
"--midgard:pi=4",
"--new_key=new value",
"--new_section:pi=3.14",
"--just_a_flag",
"--non_existing_config:midgard:foo=none",
"--file:midgard:spam=more ham",
]
remember_sys_argv, sys.argv = sys.argv, cfg_argv
config_file.update_from_options(allow_new=True)
sys.argv = remember_sys_argv
assert set(config_file.section_names) - sections_before == {"new_section"}
assert set(config_file.midgard.as_list()) - entries_before == {"new_key"}
assert config_file.midgard.foo.str == "I am an option"
assert config_file.midgard.pi.str == "4"
assert config_file.midgard.spam.str == "more ham"
assert config_file.midgard.foo.source == "command line (--foo=I am an option)"
def test_clearing_config(config_file):
"""Test that clearing a configuration works"""
config_file.clear()
assert len(config_file.sections) == 0
def test_set_non_existing_master_section(config_file):
"""Test that setting a non-existing section is ok, but getting from it raises an error"""
config_file.master_section = "non_existing"
with pytest.raises(exceptions.MissingSectionError):
config_file.non_exisiting
def test_access_from_master_section(config_file):
"""Test that accessing entry from master section can be done without referencing section"""
config_file.master_section = "midgard"
assert config_file.foo is config_file.midgard.foo
def test_access_with_master_section(config_file):
"""Test accessing an entry that is not in the master section"""
config_file.master_section = "midgard"
assert config_file.profile_test.technique.str == "none"
def test_get_from_master_section_without_master_section(config_file):
"""Test that trying to get an entry as if from a master section typically raises an error"""
with pytest.raises(exceptions.MissingSectionError):
config_file.foo
def test_get_from_master_section(config_file):
"""Test that get can access entries from a master section"""
config_file.master_section = "midgard"
entry = config_file.get("foo", default="baz")
assert entry is config_file.midgard.foo
def test_profiles_are_not_separate_sections(config_file):
"""Test that profiles are not registered as separate sections"""
assert len([s for s in config_file.section_names if s.startswith("profile_test")]) == 1
def test_profiles_are_prioritized(config_file):
"""Test that values are taken from the correct profiles, when giving a list of profiles to use"""
config_file.profiles = ["sisre", "vlbi", None]
assert config_file.profile_test.technique.str == "gnss" # from profile sisre
assert config_file.profile_test.spam.str == "bam" # from profile vlbi
assert config_file.profile_test.foo.str == "baz" # from default profile
def test_automatic_default_profile(config_file):
"""Test that default profile is included automatically"""
config_file.profiles = ["sisre", "vlbi"]
assert config_file.profiles == ["sisre", "vlbi", None]
def test_set_non_existing_profiles(config_file):
"""Test that non-existing profiles are ignored (no error)"""
config_file.profiles = ["non_existing", None]
assert config_file.profile_test.technique.str == "none" # from default profile
def test_using_only_default_profile(config_file):
"""Test that default profile can be set simply by assigning None"""
config_file.profiles = None
assert config_file.profiles == [None]
assert config_file.profile_test.technique.str == "none" # from default profile
def test_get_with_override_value(config_file):
"""Test that get with override value returns override value"""
entry = config_file.get("foo", section="midgard", value="override")
assert isinstance(entry, config.ConfigurationEntry)
assert entry.str == "override"
assert entry.source == "method call"
def test_get_with_default_value_and_non_existing_section(config_file):
"""Test that get returns default value when nothing is found in configuration"""
entry = config_file.get("foo", section="non_existing", default="default")
assert isinstance(entry, config.ConfigurationEntry)
assert entry.str == "default"
assert entry.source == "default value"
def test_get_with_default_value_and_non_existing_entry(config_file):
"""Test that get returns default value when nothing is found in configuration"""
entry = config_file.get("non_existing", section="midgard", default="default")
assert isinstance(entry, config.ConfigurationEntry)
assert entry.str == "default"
assert entry.source == "default value"
def test_get_without_default_value_and_non_existing_section(config_file):
"""Test that get raises error when nothing is found in configuration and no default value is given"""
with pytest.raises(exceptions.MissingSectionError):
config_file.get("foo", section="non_existing")
def test_get_without_default_value_and_non_existing_entry(config_file):
"""Test that get raises error when nothing is found in configuration and no default value is given"""
with pytest.raises(exceptions.MissingEntryError):
config_file.get("non_existing", section="midgard")
def test_get_from_configuration(config_file):
"""Test that get returns the same entry as regular attribute access"""
entry = config_file.get("foo", section="midgard", default="baz")
assert entry is config_file.midgard.foo
def test_get_from_fallback_config(config_file, config_dict):
"""Test that get can access entries in a fallback configuration"""
config_dict.fallback_config = config_file
entry = config_dict.get("foo", section="midgard", default="baz")
assert entry is config_file.midgard.foo
def test_exists_with_section(config_file):
"""Test that exists works for both existing and non-existing keys"""
assert config_file.exists("foo", section="midgard")
assert not config_file.exists("does_not_exist", section="midgard")
assert not config_file.exists("foo", section="does_not_exist")
def test_exists_with_master_section(config_file):
"""Test that exists works for both existing and non-existing keys without specifying section"""
config_file.master_section = "data_types"
assert config_file.exists("str")
assert not config_file.exists("does_not_exist")
def test_exists_with_master_section_defined(config_file):
"""Test that exists behaves correctly when master_section is defined and section specified"""
config_file.master_section = "data_types"
assert config_file.exists("foo", section="midgard")
assert not config_file.exists("str", section="str")
assert not config_file.exists("foo", section="does_not_exist")
def test_getattr_from_fallback_config(config_file, config_dict):
"""Test that attribute access can get entries in fallback configuration"""
config_dict.fallback_config = config_file
entry = config_dict.midgard.foo
assert entry is config_file.midgard.foo
def test_getitem_from_fallback_config(config_file, config_dict):
"""Test that dictionary access can get entries in fallback configuration"""
config_dict.fallback_config = config_file
entry = config_dict["midgard"].foo
assert entry is config_file.midgard.foo
def test_add_single_entry(config_file):
"""Test adding a single new entry"""
sections_before = set(config_file.section_names)
config_file.update("new_section", "new_key", "new_value", source="test")
assert set(config_file.section_names) - sections_before == {"new_section"}
assert config_file.new_section.new_key.str == "new_value"
assert config_file.new_section.new_key.source == "test"
def test_updating_existing_entry(config_file):
"""Test updating the value of an existing entry"""
sections_before = config_file.section_names
config_file.update("midgard", "foo", "new_value", source="test", allow_new=False)
assert config_file.section_names == sections_before
assert config_file.midgard.foo.str == "new_value"
assert config_file.midgard.foo.source == "test"
def test_updating_non_existing_section(config_file):
"""Test updating the value of an entry in a non-existing section"""
with pytest.raises(exceptions.MissingSectionError):
config_file.update("non_existing", "foo", "new_value", source="test", allow_new=False)
def test_updating_non_existing_entry(config_file):
"""Test updating the value of a non-existing entry"""
with pytest.raises(exceptions.MissingEntryError):
config_file.update("midgard", "non_existing", "new_value", source="test", allow_new=False)
@pytest.mark.skip(reason="as_str() does not print profiles correctly")
def test_configuration_as_string(config_file):
"""Test that configuration as string is similar to configuration file"""
path = pathlib.Path(list(config_file.sources)[0])
with open(path, mode="r") as fid:
file_str = "".join(l for l in fid if not l.startswith("#"))
assert normalize_whitespace(file_str) == normalize_whitespace(config_file.as_str())
@pytest.mark.skip(reason="str() does not print profiles correctly")
def test_string_representation_of_configuration(config_file):
"""Test that string representation is similar to configuration file"""
path = pathlib.Path(list(config_file.sources)[0])
with open(path, mode="r") as fid:
file_str = "".join(l for l in fid if not l.startswith("#"))
assert normalize_whitespace(file_str) == normalize_whitespace(str(config_file))
def test_configuration_as_dict(config_dict, gps_dict):
"""Test that dict representation gives back a sensible dictionary"""
assert config_dict.as_dict(default_getter="str")["gps"] == {k: str(v) for k, v in gps_dict.items()}
def test_configuration_as_dict_with_getters(config_dict, gps_dict):
"""Test that dict representation gives back a sensible dictionary"""
getters = {"gps": {k: type(v).__name__ for k, v in gps_dict.items()}}
assert config_dict.as_dict(getters=getters)["gps"] == gps_dict
def test_attribute_and_item_access(config_file):
"""Test that the same sections are returned whether using attribute or item access"""
assert config_file.midgard is config_file["midgard"]
def test_deleting_section_as_item(config_file):
"""Test that deleting a section removes it"""
sections_before = set(config_file.section_names)
del config_file["midgard"]
assert sections_before - set(config_file.section_names) == {"midgard"}
def test_deleting_section_as_attribute(config_file):
"""Test that deleting a section removes it"""
sections_before = set(config_file.section_names)
del config_file.midgard
assert sections_before - set(config_file.section_names) == {"midgard"}
def test_dir_return_sections(config_file):
"""Test that sections are included in dir(configuration)"""
cfg_dir = dir(config_file)
sections = set(config_file.section_names)
assert len(sections) > 0
assert set(cfg_dir) & sections == sections
def test_dir_return_master_section(config_file):
"""Test that entries in master section are included in dir(configuration)"""
config_file.master_section = "midgard"
cfg_dir = dir(config_file)
entries = set(config_file.midgard.as_list())
assert len(entries) > 0
assert set(cfg_dir) & entries == entries
def test_repr_of_configuration(config_file):
"""Test that repr of configuration is sensible"""
assert repr(config_file) == "Configuration(name='file')"
def test_section_as_string(config_section, gps_dict):
"""Test that string representation of section looks reasonable"""
assert only_word_characters(config_section.as_str()) == only_word_characters("gps" + str(gps_dict))
def test_section_as_list(config_section, gps_dict):
"""Test that the list representation of section equals list of keys"""
assert config_section.as_list() == list(gps_dict.keys())
def test_section_as_dict(config_section, gps_dict):
"""Test that the dict representation of section equals original dict"""
assert config_section.as_dict(default_getter="str") == {k: str(v) for k, v in gps_dict.items()}
def test_section_as_dict_with_getters(config_section, gps_dict):
"""Test that the dict representation of section equals original dict"""
getters = {k: type(v).__name__ for k, v in gps_dict.items()}
assert config_section.as_dict(getters=getters) == gps_dict
def test_dir_return_entries(config_section):
"""Test that entries are included in dir(section)"""
cfg_dir = dir(config_section)
entries = set(config_section.as_list())
assert len(entries) > 0
assert set(cfg_dir) & entries == entries
def test_repr_of_section(config_section):
"""Test that repr of section is sensible"""
assert repr(config_section) == "ConfigurationSection(name='gps')"
entry_data = [
EntryTestCase("str", "Curiouser and curiouser!", "Curiouser and curiouser!"),
EntryTestCase("int", "42", 42),
EntryTestCase("float", "3.14", 3.14),
EntryTestCase("bool", "on", True),
EntryTestCase("bool", "no", False),
EntryTestCase("date", "2018-05-30", date(2018, 5, 30)),
EntryTestCase("datetime", "2017-01-28 15:12:30", datetime(2017, 1, 28, 15, 12, 30)),
EntryTestCase("path", "test_config.conf", pathlib.Path("test_config.conf")),
EntryTestCase("list", "vlbi, slr, gnss, doris", ["vlbi", "slr", "gnss", "doris"]),
EntryTestCase("tuple", "one two three", ("one", "two", "three")),
EntryTestCase("dict", "one:en, two:to, three:tre", {"one": "en", "two": "to", "three": "tre"}),
]
@pytest.mark.parametrize("test_case", entry_data)
def test_access_entry(test_case):
"""Test getting values of entries through accessors"""
entry = config.ConfigurationEntry("test", test_case.cfg_value)
assert getattr(entry, test_case.type) == test_case.value
assert getattr(entry, f"as_{test_case.type}")() == test_case.value
@pytest.mark.parametrize("test_case", entry_data)
def test_entry_is_used(test_case):
"""Test that entry is marked as used when accessing value"""
entry = config.ConfigurationEntry("test", test_case.cfg_value)
assert entry.is_used is False
getattr(entry, test_case.type)
assert entry.is_used is True
def test_access_enum():
"""Test getting the value of an entry as an enum (has no property access)"""
entry = config.ConfigurationEntry("test", "info")
assert entry.as_enum("log_level") is enums.get_value("log_level", "info")
def test_enum_is_used():
"""Test that entry is marked as used when accessed as enum"""
entry = config.ConfigurationEntry("test", "info")
assert entry.is_used is False
entry.as_enum("log_level")
assert entry.is_used is True
def test_entry_with_type(config_file):
"""Test that type hints of an entry can be accessed"""
assert config_file.midgard.foo.type == "str"
def test_entry_with_help(config_file):
"""Test that help texts of an entry can be accessed"""
assert config_file.midgard.foo.help == "How to foodazzle"
def test_metadata_of_entry(config_file):
"""Test that metadata of entry can be accessed"""
assert len(config_file.midgard.foo.meta.keys()) > 0
assert config_file.midgard.foo.meta["type"] is config_file.midgard.foo.type
assert config_file.midgard.foo.meta["help"] is config_file.midgard.foo.help
def test_bool_of_entry():
"""Test the bool value of an entry"""
entry = config.ConfigurationEntry("key", "value")
assert entry
def test_bool_of_empty_entry():
"""Test that the bool value of an empty entry is False"""
entry = config.ConfigurationEntry("empty", "")
assert not entry
def test_repr_of_entry():
"""Test that the repr of an entry is sensible"""
entry = config.ConfigurationEntry("key", "value")
assert repr(entry) == "ConfigurationEntry(key='key', value='value')"
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# Created by Jared Dunbar, April 4th, 2020
# Use this as an example for a basic game.
import pyxel, random, math
import os.path
from os import path
# Width and height of game screen, in tiles
WIDTH = 16
HEIGHT = 12
# Width and height of the game level
GL_WIDTH = 170
GL_HEIGHT = 150
# Window offsets for the panning feature.
windowOffsetX = 0
windowOffsetY = 0
# Entities (should not) be able to walk through structures,
# unless they have "allow" set to True
structures = []
# Entities can move all over the place and stand in the same cube, but not walk
# into structures unless the structure has "allow" set to True
entities = []
# These contain all fireables and are cleared relatively often.
lazers = []
# Sound mappings
sounds = {}
# These are the texture maps for 8x8 and 16x16
texture8 = {}
texture16 = {}
# Information about the image map:
# Image maps are 256x256. This allows for 256 16x16 textures in one tilemap,
# or 1024 8x8 textures in one tilemap
# Image Map 0: 16x16 textures
# Image Map 1: 8x8 textures
# Image Map 2: <unused>
# This sets up all the rendering code for ya. Give it a image,
# and it will remember the thing for you.
# NOTE: transparent is a color key. If -1, doesn't do transparent stuff.
class Drawn():
def __init__(self, name, size=16, texture="invalid16.png", transparent=-1):
if (size != 8) and (size != 16):
print("CRITICAL FAIL! Texture is not of correct size!")
exit(1)
self.trans = transparent
if size == 16:
# Only register if we're not in the 16x16 texturemap
if name not in texture16:
if not path.exists(texture):
texture = "invalid16.png"
# 16x16 is in bank 0
self.bank = 0
self.xLoc = int(len(texture16)/16)*16
self.yLoc = (len(texture16)%16) * 16
pyxel.image(self.bank).load(self.xLoc, self.yLoc, texture)
texture16[name] = self
elif size == 8:
# Only register if we're not in the 8x8 texturemap
if name not in texture8:
if not path.exists(texture):
print("Could not find texture {}".format(texture))
texture = "invalid8.png"
# 8x8 is in bank 1
self.bank = 1
self.xLoc = int(len(texture8)/32)*8
self.yLoc = (len(texture8)%32)*8
pyxel.image(self.bank).load(self.xLoc, self.yLoc, texture)
texture8[name] = self
def draw(self, x, y, trans=None, fX=False, fY=False):
if (trans == None):
trans = self.trans
# Default texture size is 16x16
ts = 16
# If we're in Bank 1, texture size is 8x8
if self.bank == 1:
ts = 8
xf = ts
yf = ts
if fX:
xf = -ts
if fY:
yf = -ts
pyxel.blt(x*abs(ts), y*abs(ts), self.bank, self.xLoc, self.yLoc, xf, yf, trans)
class Sounded():
def __init__(self, name, notes, tone="s", volume="4", effect=("n" * 4 + "f"), speed=7):
if name not in sounds:
self.id = len(sounds)
pyxel.sound(self.id).set(note=notes, tone=tone, volume=volume, effect=effect, speed=speed)
sounds[name] = self
# There are 4 streams - 0 through 3
def play(self, stream=0):
pyxel.play(stream, self.id)
# This is the base class of any thing that renders to the screen and ticks.
class Entity():
def __init__(self, name, texture=["invalid16.png"], x=0, y=0):
self.name = name
self.x = x
self.y = y
self.allow = False
self.frameNum = 0
self.dir = "N"
self.texName = [x.rsplit(".",1)[0] for x in texture]
for tex in texture:
texName = tex.rsplit(".",1)[0] # remove file extension
Drawn(texName, 16, tex)
def update(self):
pass
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[self.texName[self.frameNum]].draw(drawX, drawY)
class Lazer():
def __init__(self, owner, x, y, dir):
self.owner = owner
self.x = x
self.y = y
self.dir = dir
def draw(self):
drawX = (self.x + windowOffsetX)*2
drawY = (self.y + windowOffsetY)*2
if (drawX >= 0 and drawX < WIDTH*2) and (drawY >=0 and drawY < HEIGHT*2):
if (self.dir == "N" or self.dir == "S"):
texture8["player/beem_V{}".format(random.randrange(0,3))].draw(drawX + 0.5, drawY + 0.5, 0)
else:
texture8["player/beem_H{}".format(random.randrange(0,3))].draw(drawX + 0.5, drawY + 0.5, 0)
class Wall(Entity):
def __init__(self, name, x, y):
super(Wall, self).__init__(name, ["player/wall_{}.png".format(x) for x in range(0,12)], x, y)
self.frameNum = 0 + random.randrange(0,12)
self.randX = random.choice([True, False])
self.randY = random.choice([True, False])
def update(self):
pass
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[self.texName[int(self.frameNum)]].draw(drawX, drawY, 0, fX=self.randX, fY=self.randY)
self.frameNum += 0.5
if (self.frameNum >= 12):
self.frameNum = 0
class Floor(Entity):
def __init__(self, name, x, y):
super(Floor, self).__init__(name, [random.choice(["player/ground.png"]*8 + ["player/ground_blip.png"])], x, y)
self.allow = True
self.randX = random.choice([True, False])
self.randY = random.choice([True, False])
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[self.texName[self.frameNum]].draw(drawX, drawY, fX=self.randX, fY=self.randY)
# The player class extends Entity by listening for keyboard events.
class Player(Entity):
def __init__(self, name, x=WIDTH/2, y=HEIGHT/2):
super(Player, self).__init__(name, ["player/char_H{}.png".format(x) for x in range(0,12)] + ["player/char_V{}.png".format(x) for x in range(0,12)], x, y)
self.cooldown = 0
self.cooldownTime = 2
self.frameNum = 1
self.texHnames = [x for x in self.texName if "H" in x]
self.texVnames = [x for x in self.texName if "V" in x]
def update(self):
self.cooldown -= 1
if (self.cooldown <= 0):
wantGoX = 0
wantGoY = 0
if pyxel.btn(pyxel.KEY_UP):
wantGoY -= 1
self.dir = "N"
if pyxel.btn(pyxel.KEY_DOWN):
wantGoY += 1
self.dir = "S"
if pyxel.btn(pyxel.KEY_LEFT):
wantGoX -= 1
self.dir = "E"
if pyxel.btn(pyxel.KEY_RIGHT):
wantGoX += 1
self.dir = "W"
if (wantGoX != 0 or wantGoY != 0):
if canGo(self.x, self.y, wantGoX, wantGoY):
global windowOffsetX, windowOffsetY
self.x = self.x + wantGoX
self.y = self.y + wantGoY
self.cooldown = self.cooldownTime
windowOffsetX -= wantGoX
windowOffsetY -= wantGoY
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
fX = False
fY = False
ch = self.texHnames
if self.dir == "N":
fX = True
fY = True
ch = self.texVnames
if self.dir == "S":
fX = False
fY = False
ch = self.texVnames
if self.dir == "E":
fX = False
fY = False
ch = self.texHnames
if self.dir == "W":
fX = True
fY = True
ch = self.texHnames
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[ch[self.frameNum - 1]].draw(drawX, drawY, 0, fX=fX, fY=fY)
self.frameNum += 1
if (self.frameNum >= 12):
self.frameNum = 0
class StationaryTurret(Entity):
def __init__(self, name, x=WIDTH/2, y=HEIGHT/2, dir="N"):
super(StationaryTurret, self).__init__(name, ["player/turret_H.png", "player/turret_V.png"], x, y)
self.texHnames = [x for x in self.texName if "H" in x]
self.texVnames = [x for x in self.texName if "V" in x]
self.dir = dir
self.charge = 0
self.chargeTexNames = []
self.HbeamNames = []
self.VbeamNames = []
self.owner = random.randrange(0,32000) # good enough
for tex in ["player/turret_charge_{}.png".format(x) for x in range(0,4)]:
texName = tex.rsplit(".",1)[0] # remove file extension
self.chargeTexNames.append(texName)
Drawn(texName, 8, tex)
for tex in ["player/beem_H{}.png".format(x) for x in range(0,3)]:
texName = tex.rsplit(".",1)[0]
self.HbeamNames.append(texName)
Drawn(texName, 8, tex)
for tex in ["player/beem_V{}.png".format(x) for x in range(0,3)]:
texName = tex.rsplit(".",1)[0]
self.VbeamNames.append(texName)
Drawn(texName, 8, tex)
def update(self):
charge = 0
for entity in entities:
#print(entity)
if isinstance(entity, Player):
#print("{} is player!".format(entity))
xdiff = math.pow(entity.x - self.x, 2)
ydiff = math.pow(entity.y - self.y, 2)
if xdiff + ydiff < 10:
#print("ARMING {} {}".format(self.x, self.y))
charge += 0.5
if (charge == 0):
if (self.charge > 0):
self.charge -= 1
else:
if self.charge < 3:
self.charge += 1
if (self.charge == 3):
sounds["bzzz"].play(2)
self.placeLazer(self.dir)
def placeLazer(self, direction="N"):
count = 0
if direction == "N" or direction == "S":
beamNames = self.HbeamNames
if direction == "N":
for y in range(0, HEIGHT*4):
yL = self.y - y/2
lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "N")
lazers.append(lz)
if direction == "S":
for y in range(0, HEIGHT*4):
yL = self.y + y/2
lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "S")
lazers.append(lz)
elif direction == "E" or direction == "W":
beamNames = self.VbeamNames
if direction == "E":
for x in range(0, WIDTH*4):
xL = self.x - x/2
lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "E")
lazers.append(lz)
if direction == "W":
for x in range(0, WIDTH*4):
xL = self.x + x/2
lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "W")
lazers.append(lz)
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
fX = False
fY = False
ch = self.texHnames
if self.dir == "N":
fX = True
fY = True
ch = self.texVnames
if self.dir == "S":
fX = False
fY = False
ch = self.texVnames
if self.dir == "E":
fX = False
fY = False
ch = self.texHnames
if self.dir == "W":
fX = True
fY = True
ch = self.texHnames
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[ch[0]].draw(drawX, drawY, 0, fX=fX, fY=fY)
texture8[self.chargeTexNames[int(self.charge)]].draw(drawX*2+0.5, drawY*2+0.5, 0)
class MovingTurret(Entity):
def __init__(self, name, x=WIDTH/2, y=HEIGHT/2, dir="N"):
super(MovingTurret, self).__init__(name, ["player/turret_H{}.png".format(x) for x in range(0,12)] + ["player/turret_V{}.png".format(x) for x in range(0,12)], x, y)
self.cooldown = 0
self.cooldownTime = 2
self.frameNum = 1
self.texHnames = [x for x in self.texName if "H" in x]
self.texVnames = [x for x in self.texName if "V" in x]
self.dir = dir
self.charge = 0
self.chargeTexNames = []
self.HbeamNames = []
self.VbeamNames = []
self.owner = random.randrange(0,32000) # good enough
for tex in ["player/turret_charge_{}.png".format(x) for x in range(0,4)]:
texName = tex.rsplit(".",1)[0] # remove file extension
self.chargeTexNames.append(texName)
Drawn(texName, 8, tex)
for tex in ["player/beem_H{}.png".format(x) for x in range(0,3)]:
texName = tex.rsplit(".",1)[0]
self.HbeamNames.append(texName)
Drawn(texName, 8, tex)
for tex in ["player/beem_V{}.png".format(x) for x in range(0,3)]:
texName = tex.rsplit(".",1)[0]
self.VbeamNames.append(texName)
Drawn(texName, 8, tex)
def update(self):
charge = 0
for entity in entities:
#print(entity)
if isinstance(entity, Player):
#print("{} is player!".format(entity))
xdiff = math.pow(entity.x - self.x, 2)
ydiff = math.pow(entity.y - self.y, 2)
if xdiff + ydiff < 10:
#print("ARMING {} {}".format(self.x, self.y))
charge += 0.5
if (charge == 0):
if (self.charge > 0):
self.charge -= 1
else:
if self.charge < 3:
self.charge += 1
if (self.charge == 3):
sounds["bzzz"].play(2)
self.placeLazer(self.dir)
def placeLazer(self, direction="N"):
count = 0
if direction == "N" or direction == "S":
beamNames = self.HbeamNames
if direction == "N":
for y in range(0, HEIGHT*4):
yL = self.y - y/2
lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "N")
lazers.append(lz)
if direction == "S":
for y in range(0, HEIGHT*4):
yL = self.y + y/2
lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "S")
lazers.append(lz)
elif direction == "E" or direction == "W":
beamNames = self.VbeamNames
if direction == "E":
for x in range(0, WIDTH*4):
xL = self.x - x/2
lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "E")
lazers.append(lz)
if direction == "W":
for x in range(0, WIDTH*4):
xL = self.x + x/2
lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "W")
lazers.append(lz)
def draw(self):
drawX = self.x + windowOffsetX
drawY = self.y + windowOffsetY
fX = False
fY = False
ch = self.texHnames
if self.dir == "N":
fX = True
fY = True
ch = self.texVnames
if self.dir == "S":
fX = False
fY = False
ch = self.texVnames
if self.dir == "E":
fX = False
fY = False
ch = self.texHnames
if self.dir == "W":
fX = True
fY = True
ch = self.texHnames
if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT):
texture16[ch[self.frameNum - 1]].draw(drawX, drawY, 0, fX=fX, fY=fY)
texture8[self.chargeTexNames[self.charge]].draw(drawX*2+0.5, drawY*2+0.5, 0)
self.frameNum += 1
if (self.frameNum >= 12):
self.frameNum = 0
if (self.frameNum == 3):
if self.dir == "N":
self.dir = "E"
elif self.dir == "E":
self.dir = "S"
elif self.dir == "S":
self.dir = "W"
elif self.dir == "W":
self.dir = "N"
# This tells you if an entity is permitted to go somewhere.
# From x,y with velocity a,b
def canGo(x, y, a, b):
# Don't allow to exit past the edges of the screen
if ((x+a) < 0 or (x+a) >= GL_WIDTH):
sounds["collide"].play(0)
return False
if ((y+b) < 0 or (y+b) >= GL_HEIGHT):
sounds["collide"].play(0)
return False
# Basic structure checks in direction
for s in structures:
if (s.x == (x+a)) and (s.y == (y+b)):
if s.allow:
return True
sounds["collide"].play(0)
return False
# Advanced structure checks on diagonals
if not (x == a or y == b):
xCheck = False
yCheck = False
for s in structures:
if (s.x == (x+a) and (s.y == y)):
xCheck = not s.allow
if (s.x == x) and (s.y == (y+b)):
yCheck = not s.allow
if xCheck and yCheck:
sounds["collide"].play(0)
return False
return True
# This sets up the game
def setup():
# Register with Pyxel
pyxel.init(WIDTH * 16, HEIGHT * 16, caption="smolgame", palette=[0xff00e5, 0xaaa9ad, 0x5b676d, 0x1f262a, 0x9cff78, 0x44ff00, 0x2ca600, 0x7cff00, 0xff8b00, 0xff0086, 0x6f00ff, 0x0086ff, 0x00ff9a, 0x1f0000, 0x49afff, 0xe2e1ff], scale=4, fps=20)
# Register sounds
Sounded("collide", "c2c1", speed=4)
Sounded("level", "c3e3g3c4c4")
Sounded("bzzz", "c1c1c1c1c1c1c1", tone="t", speed=9)
# Register our player
player = Player("player")
entities.append(player)
st = StationaryTurret("turret", -1, -1, "N")
entities.append(st)
st = StationaryTurret("turret", 16, 16, "S")
entities.append(st)
st = StationaryTurret("turret", -1, 16, "W")
entities.append(st)
st = StationaryTurret("turret", 16, -1, "E")
entities.append(st)
mt = MovingTurret("turret", 8, 8, "N")
entities.append(mt)
#wa = Wall("wall", -1, 11)
#structures.append(wa)
#wa = Wall("wall", -1, 12)
#structures.append(wa)
#wa = Wall("wall", -1, 13)
#structures.append(wa)
#wa = Wall("wall", -1, 14)
#structures.append(wa)
#wa = Wall("wall", -1, 15)
#structures.append(wa)
# Invalid texture test code
#random = Entity("random", "random.png")
#entities.append(random)
def mapObjType(type, ct, cb, cl, cr):
if type == "W":
return Wall
if type == "F":
return Floor
if type[0] == "C":
if "U" in type and ct:
if "W" in type:
return Wall
if "F" in type:
return Floor
if "L" in type and cl:
if "W" in type:
return Wall
if "F" in type:
return Floor
if "R" in type and cr:
if "W" in type:
return Wall
if "F" in type:
return Floor
if "D" in type and cb:
if "W" in type:
return Wall
if "F" in type:
return Floor
return None
if type[0] == "O":
if "U" in type and ct:
return Floor
if "D" in type and cl:
return Floor
if "R" in type and cr:
return Floor
if "L" in type and cb:
return Floor
return Wall
return None
def parseRoomCSV(csvFile, ct, cb, cl, cr):
f = open(csvFile)
dat = f.read()
f.close()
lines = [x for x in dat.split("\n") if x.strip() != ""]
roomData = []
for line in lines:
ld = []
for entry in line.split(","):
ld.append(mapObjType(entry,ct,cb,cl,cr))
roomData.append(ld)
return roomData
class RoomTile():
def __init__(self, ct, cb, cl, cr):
self.ct = ct
self.cl = cl
self.cr = cr
self.cb = cb
# x and y are the room tile location, not the render tile. Room tiles are 15x15 the image tiles
def generateInWorld(self, x, y):
pass
# Generates a room
class Room(RoomTile):
def generateInWorld(self, x, y):
roomData = parseRoomCSV("room.csv",self.ct,self.cb,self.cl,self.cr)
for xL in range(0,15):
for yL in range(0,15):
tile = roomData[xL][yL]
if (tile == Floor):
tileObj = tile(name="floor", x=xL+x*15, y=yL+y*15)
structures.append(tileObj)
elif (tile == Wall):
tileObj = tile(name="wall", x=xL+x*15, y=yL+y*15)
structures.append(tileObj)
# Generates a thin hallway between two or more rooms
class Hallway(RoomTile):
def generateInWorld(self, x, y):
roomData = parseRoomCSV("hall.csv",self.ct,self.cb,self.cl,self.cr)
for xL in range(0,15):
for yL in range(0,15):
tile = roomData[xL][yL]
if (tile == Floor):
tileObj = tile(name="floor", x=xL+x*15, y=yL+y*15)
structures.append(tileObj)
elif (tile == Wall):
tileObj = tile(name="wall", x=xL+x*15, y=yL+y*15)
structures.append(tileObj)
def basicWorldgen():
h = Hallway(True, True, True, True)
h.generateInWorld(0, 1)
r = Room(True, True, True, True)
r.generateInWorld(0, 0)
r = Room(True, True, True, True)
r.generateInWorld(1, 0)
# Generate the world! You can use this to generate levels or whatever
def worldgen(roomSetup):#
rooms = roomSetup
#rooms += [item for sublist in [[x[0] for y in range(x[1])] for x in roomSetup] for item in sublist]
map = []
roommap = []
for x in range(0,15):
map.append([])
roommap.append([])
for y in range(0,9):
map[x].append(0)
roommap[x].append(None)
x = 1
y = 1
while len(rooms) > 1:
map[x][y] = 1
roommap[x][y] = rooms.pop(random.randrange(0,len(rooms)))
n = random.randrange(1,5)
direction = 0
not_this_way = 0
while n > 0:
while direction == not_this_way:
direction = random.randrange(1,4)
if direction == 1: # Left
if x > 0:
not_this_way = 3
x = x - 1
else:
not_this_way = 1
x = x + 1
if map[x][y] == 0:
map[x][y] = 2
elif direction == 2: # Up
if y > 0:
not_this_way = 4
y = y - 1
else:
not_this_way = 2
y = y + 1
if map[x][y] == 0:
map[x][y] = 2
elif direction == 3: # Right
if x < 14:
not_this_way = 1
x = x + 1
else:
not_this_way = 3
x = x - 1
if map[x][y] == 0:
map[x][y] = 2
elif direction == 4: # Down
if y < 8:
not_this_way = 2
y = y + 1
else:
not_this_way = 4
y = y - 1
if map[x][y] == 0:
map[x][y] = 2
if roommap[x][y] == None or n > 1:
n = n - 1
map[x][y] = 1
roommap[x][y] = rooms.pop(random.randrange(0,len(rooms)))
for x in range(0,15):
for y in range(0,9):
mxy = map[x][y]
if mxy == 0:
continue
mxyl = False
mxyu = False
mxyd = False
mxyr = False
if y > 0:
if map[x][y-1] != 0:
mxyu = True
if y < 8:
if map[x][y+1] != 0:
mxyd = True
if x > 0:
if map[x-1][y] != 0:
mxyl = True
if x < 14:
if map[x+1][y] != 0:
mxyr = True
if mxy == 1:
roomobj = Room(mxyu,mxyd,mxyl,mxyr)
elif mxy == 2:
roomobj = Hallway(mxyu,mxyd,mxyl,mxyr)
roomobj.generateInWorld(x,y)
# This is called by Pyxel every tick, and handles all game inputs
def update():
# Quit if Q
if pyxel.btn(pyxel.KEY_Q):
pyxel.quit()
# Play a sound if Space
if pyxel.btn(pyxel.KEY_SPACE):
sounds["level"].play(1)
# Tick all entites and structures. The player movement is included randomly
# somewhere in this list but you can do a list comprehension to make it
# go first or last if you want (examples provided with no warranty)
# for x in [x for x in entities if x is Player]
# for x in [x for x in entities if x is not Player]
# Clear all lazers
lazers.clear()
for x in structures:
x.update()
for x in entities:
x.update()
# This is called by Pyxel every time the screen needs a redraw, which can be
# more than once per tick, but really depends on the FPS?
def draw():
# Clear the screen
pyxel.cls(col=3)
for x in structures:
x.draw()
for x in lazers:
x.draw()
for x in entities:
x.draw()
# This is where the game setup logic is
def run():
setup()
basicWorldgen()
#worldgen([0,0,0,0,0,0,0,0,0,0,0,0])
pyxel.run(update, draw)
# This is the entry point for our file.
run()
|
nilq/baby-python
|
python
|
import os
from urllib.parse import urljoin, urlparse
import urllib
import ntpath
is_win32 = os.name == "nt"
def createDirectory(base, new_dir):
if is_win32:
new_dir = cleanName(new_dir, ".")
if not base.startswith("\\\\?\\"): base = "\\\\?\\" + base
path_new_dir = os.path.join(base, new_dir)
if not os.path.exists(path_new_dir): os.mkdir(path_new_dir)
return path_new_dir
def longPath(path):
if is_win32 and not path.startswith("\\\\?\\"):
return "\\\\?\\" + path
return path
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
return None
def cleanName(value, deletechars = '<>:"/\\|?*\r\n'):
value = str(value)
for c in deletechars:
value = value.replace(c,'')
return value
def GetFileNameFromUrl(url):
urlParsed = urlparse(urllib.parse.unquote(url))
fileName = os.path.basename(urlParsed.path).encode('utf-8')
return cleanName(fileName)
def pathLeaf(path):
'''
Name..........: pathLeaf
Description...: get file name from full path
Parameters....: path - string. Full path
Return values.: string file name
Author........: None
'''
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def path_join(*args):
new_path = os.path.join(*args)
if os.path.altsep:
return new_path.replace(os.path.sep, os.path.altsep)
return new_path
|
nilq/baby-python
|
python
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import itervalues
import copy
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from buildbot.data import sourcestamps as sourcestampsapi
from buildbot.data import base
from buildbot.data import types
from buildbot.process.buildrequest import BuildRequestCollapser
from buildbot.process.results import SUCCESS
from buildbot.process.results import worst_status
from buildbot.util import datetime2epoch
from buildbot.util import epoch2datetime
class Db2DataMixin(object):
@defer.inlineCallbacks
def db2data(self, bsdict):
if not bsdict:
defer.returnValue(None)
buildset = bsdict.copy()
# gather the actual sourcestamps, in parallel
sourcestamps = []
@defer.inlineCallbacks
def getSs(ssid):
ss = yield self.master.data.get(('sourcestamps', str(ssid)))
sourcestamps.append(ss)
yield defer.DeferredList([getSs(id)
for id in buildset['sourcestamps']],
fireOnOneErrback=True, consumeErrors=True)
buildset['sourcestamps'] = sourcestamps
# minor modifications
buildset['submitted_at'] = datetime2epoch(buildset['submitted_at'])
buildset['complete_at'] = datetime2epoch(buildset['complete_at'])
defer.returnValue(buildset)
fieldMapping = {
'bsid': 'buildsets.id',
'external_idstring': 'buildsets.external_idstring',
'reason': 'buildsets.reason',
'submitted_at': 'buildsets.submitted_at',
'complete': 'buildsets.complete',
'complete_at': 'buildsets.complete_at',
'results': 'buildsets.results',
'parent_buildid': 'buildsets.parent_buildid',
'parent_relationship': 'buildsets.parent_relationship'
}
class BuildsetEndpoint(Db2DataMixin, base.Endpoint):
isCollection = False
pathPatterns = """
/buildsets/n:bsid
"""
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
res = yield self.master.db.buildsets.getBuildset(kwargs['bsid'])
res = yield self.db2data(res)
defer.returnValue(res)
class BuildsetsEndpoint(Db2DataMixin, base.Endpoint):
isCollection = True
pathPatterns = """
/buildsets
"""
rootLinkName = 'buildsets'
def get(self, resultSpec, kwargs):
complete = resultSpec.popBooleanFilter('complete')
resultSpec.fieldMapping = self.fieldMapping
d = self.master.db.buildsets.getBuildsets(complete=complete, resultSpec=resultSpec)
@d.addCallback
def db2data(buildsets):
d = defer.DeferredList([self.db2data(bs) for bs in buildsets],
fireOnOneErrback=True, consumeErrors=True)
@d.addCallback
def getResults(res):
return [r[1] for r in res]
return d
return d
class Buildset(base.ResourceType):
name = "buildset"
plural = "buildsets"
endpoints = [BuildsetEndpoint, BuildsetsEndpoint]
keyFields = ['bsid']
eventPathPatterns = """
/buildsets/:bsid
"""
class EntityType(types.Entity):
bsid = types.Integer()
external_idstring = types.NoneOk(types.String())
reason = types.String()
submitted_at = types.Integer()
complete = types.Boolean()
complete_at = types.NoneOk(types.Integer())
results = types.NoneOk(types.Integer())
sourcestamps = types.List(
of=sourcestampsapi.SourceStamp.entityType)
parent_buildid = types.NoneOk(types.Integer())
parent_relationship = types.NoneOk(types.String())
entityType = EntityType(name)
@base.updateMethod
@defer.inlineCallbacks
def addBuildset(self, waited_for, scheduler=None, sourcestamps=None, reason=u'',
properties=None, builderids=None, external_idstring=None,
parent_buildid=None, parent_relationship=None,
_reactor=reactor):
if sourcestamps is None:
sourcestamps = []
if properties is None:
properties = {}
if builderids is None:
builderids = []
submitted_at = int(_reactor.seconds())
bsid, brids = yield self.master.db.buildsets.addBuildset(
sourcestamps=sourcestamps, reason=reason,
properties=properties, builderids=builderids,
waited_for=waited_for, external_idstring=external_idstring,
submitted_at=epoch2datetime(submitted_at),
parent_buildid=parent_buildid, parent_relationship=parent_relationship)
yield BuildRequestCollapser(self.master, list(itervalues(brids))).collapse()
# get each of the sourcestamps for this buildset (sequentially)
bsdict = yield self.master.db.buildsets.getBuildset(bsid)
sourcestamps = []
for ssid in bsdict['sourcestamps']:
sourcestamps.append(
(yield self.master.data.get(('sourcestamps', str(ssid)))).copy()
)
# notify about the component build requests
brResource = self.master.data.getResourceType("buildrequest")
brResource.generateEvent(list(itervalues(brids)), 'new')
# and the buildset itself
msg = dict(
bsid=bsid,
external_idstring=external_idstring,
reason=reason,
submitted_at=submitted_at,
complete=False,
complete_at=None,
results=None,
scheduler=scheduler,
sourcestamps=sourcestamps)
# TODO: properties=properties)
self.produceEvent(msg, "new")
log.msg("added buildset %d to database" % bsid)
# if there are no builders, then this is done already, so send the
# appropriate messages for that
if not builderids:
yield self.maybeBuildsetComplete(bsid, _reactor=_reactor)
defer.returnValue((bsid, brids))
@base.updateMethod
@defer.inlineCallbacks
def maybeBuildsetComplete(self, bsid, _reactor=reactor):
brdicts = yield self.master.db.buildrequests.getBuildRequests(
bsid=bsid, complete=False)
# if there are incomplete buildrequests, bail out
if brdicts:
return
brdicts = yield self.master.db.buildrequests.getBuildRequests(bsid=bsid)
# figure out the overall results of the buildset:
cumulative_results = SUCCESS
for brdict in brdicts:
cumulative_results = worst_status(
cumulative_results, brdict['results'])
# get a copy of the buildset
bsdict = yield self.master.db.buildsets.getBuildset(bsid)
# if it's already completed, we're late to the game, and there's
# nothing to do.
#
# NOTE: there's still a strong possibility of a race condition here,
# which would cause two buildset.$bsid.complete messages to be sent.
# That's an acceptable risk, and a necessary consequence of this
# denormalized representation of a buildset's state.
if bsdict['complete']:
return
# mark it as completed in the database
complete_at = epoch2datetime(int(_reactor.seconds()))
yield self.master.db.buildsets.completeBuildset(bsid,
cumulative_results, complete_at=complete_at)
# get the sourcestamps for the message
# get each of the sourcestamps for this buildset (sequentially)
bsdict = yield self.master.db.buildsets.getBuildset(bsid)
sourcestamps = []
for ssid in bsdict['sourcestamps']:
sourcestamps.append(
copy.deepcopy(
(yield self.master.data.get(('sourcestamps', str(ssid))))
)
)
msg = dict(
bsid=bsid,
external_idstring=bsdict['external_idstring'],
reason=bsdict['reason'],
sourcestamps=sourcestamps,
submitted_at=bsdict['submitted_at'],
complete=True,
complete_at=complete_at,
results=cumulative_results)
# TODO: properties=properties)
self.produceEvent(msg, "complete")
|
nilq/baby-python
|
python
|
import json
import logging
from platform import system
from ctypes import (c_char_p, c_int, c_uint, c_long, Structure, cdll, POINTER)
from typing import Any, TYPE_CHECKING, Tuple, List, AnyStr
from rita.engine.translate_standalone import rules_to_patterns, RuleExecutor
from rita.types import Rules
logger = logging.getLogger(__name__)
field = Tuple[AnyStr, Any]
fields = List[field]
if TYPE_CHECKING:
# We cannot simply import SessionConfig because of cyclic imports
from rita.config import SessionConfig
class NamedRangeResult(Structure):
_fields_ = [
("start", c_long),
("end", c_long),
("name", c_char_p),
]
class ResultEntity(Structure):
_fields_ = [
("label", c_char_p),
("start", c_long),
("end", c_long),
("sub_count", c_uint),
]
class Result(Structure):
_fields_ = [
("count", c_uint)
]
class Context(Structure):
_fields_: fields = []
def load_lib():
try:
os_name = system()
if os_name == "Windows":
lib = cdll.LoadLibrary("rita_rust.dll")
elif os_name == "Darwin":
lib = cdll.LoadLibrary("librita_rust.dylib")
else:
lib = cdll.LoadLibrary("librita_rust.so")
lib.compile.restype = POINTER(Context)
lib.execute.argtypes = [POINTER(Context), c_char_p]
lib.execute.restype = POINTER(Result)
lib.clean_env.argtypes = [POINTER(Context)]
lib.clean_result.argtypes = [POINTER(Result)]
lib.read_result.argtypes = [POINTER(Result), c_int]
lib.read_result.restype = POINTER(ResultEntity)
lib.read_submatch.argtypes = [POINTER(ResultEntity), c_int]
lib.read_submatch.restype = POINTER(NamedRangeResult)
return lib
except Exception as ex:
logger.error("Failed to load rita-rust library, reason: {}\n\n"
"Most likely you don't have required shared library to use it".format(ex))
class RustRuleExecutor(RuleExecutor):
def __init__(self, patterns, config: "SessionConfig"):
self.config = config
self.context = None
self.lib = load_lib()
self.patterns = [self._build_regex_str(label, rules)
for label, rules in patterns]
self.compile()
@staticmethod
def _build_regex_str(label, rules):
indexed_rules = ["(?P<s{}>{})".format(i, r) if not r.startswith("(?P<") else r
for i, r in enumerate(rules)]
return r"(?P<{0}>{1})".format(label, "".join(indexed_rules))
def compile(self):
flag = 0 if self.config.ignore_case else 1
c_array = (c_char_p * len(self.patterns))(*list([p.encode("UTF-8") for p in self.patterns]))
self.context = self.lib.compile(c_array, len(c_array), flag)
return self.context
def execute(self, text, include_submatches=True):
result_ptr = self.lib.execute(self.context, text.encode("UTF-8"))
count = result_ptr[0].count
for i in range(0, count):
match_ptr = self.lib.read_result(result_ptr, i)
match = match_ptr[0]
matched_text = text[match.start:match.end].strip()
def parse_subs():
k = match.sub_count
for j in range(0, k):
s = self.lib.read_submatch(match_ptr, j)[0]
start = s.start
end = s.end
sub_text = text[start:end]
if sub_text.strip() == "":
continue
yield {
"text": sub_text.strip(),
"start": start,
"end": end,
"key": s.name.decode("UTF-8"),
}
yield {
"start": match.start,
"end": match.end,
"text": matched_text,
"label": match.label.decode("UTF-8"),
"submatches": list(parse_subs()) if include_submatches else []
}
def clean_context(self):
self.lib.clean_env(self.context)
@staticmethod
def load(path):
from rita.config import SessionConfig
config = SessionConfig()
with open(path, "r") as f:
patterns = [(obj["label"], obj["rules"])
for obj in map(json.loads, f.readlines())]
return RustRuleExecutor(patterns, config)
def compile_rules(rules: Rules, config: "SessionConfig", **kwargs) -> RustRuleExecutor:
logger.info("Using rita-rust rule implementation")
patterns = [rules_to_patterns(*group, config=config) for group in rules]
executor = RustRuleExecutor(patterns, config)
return executor
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from codecs import open
import json
import opengraph
from repos import final_theses as thesis_slugs
template = open('_template.html', 'r', 'utf-8').read()
theses = []
for thesis_slug in thesis_slugs:
url = 'http://kabk.github.io/%s/' % thesis_slug
print "parsing %s:" % url
g = opengraph.OpenGraph(url=url, scrape=True)
d = json.loads(g.to_json())
d['slug'] = thesis_slug
theses.append(d)
template = open('_template.html', 'r', 'utf-8').read()
thesis_template = """
<div class="preview">
<figure>
<a href="{url}"><img src="{image}"/></a>
</figure>
<h2><a href="{url}">{title}</a></h2>
<h3>{creator}</h3>
<p>{description} <a href="{url}">Continue reading…</a></p>
</div>
"""
thesis_links = ""
for thesis in theses:
thesis_links += thesis_template.format(image=thesis['image'],
title=thesis['title'],
creator=thesis['creator'],
description=thesis['description'],
url=thesis['url'],
slug=thesis['slug'])
result = template.format(body=thesis_links)
generated_file = open('index.html', 'w', 'utf-8')
generated_file.write(result)
generated_file.close()
|
nilq/baby-python
|
python
|
import matplotlib
matplotlib.use('TkAgg')
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import ode
def f(x, y):
""" Правая часть ДУ y'=f(x, y) """
return x/4-1/(1+y**2)
def on_move(event):
""" Обработчик событий мыши """
# начальные данные
x0 = event.xdata
y0 = event.ydata
# выход курсора за пределы системы координат
if not x0 or not y0:
line.set_data([], [])
fig.canvas.draw()
return
dt = 0.05 # шаг интегрирования
sol = [] # решение
de = ode(f)
de.set_integrator('dop853')
# интегрирование "вправо" от начальной точки
de.set_initial_value(y0, x0)
while de.successful() and de.t <= xlim.end:
de.integrate(de.t + dt)
sol.append((de.t, de.y[0]))
# интегрирование "влево" от начальной точки
de.set_initial_value(y0, x0)
while de.successful() and de.t >= xlim.start:
de.integrate(de.t - dt)
sol.append((de.t, de.y[0]))
sol.sort(key=lambda x: x[0])
sol = list(zip(*sol))
if event.button:
ax.plot(sol[0], sol[1], 'r')
else:
line.set_data(sol[0], sol[1])
fig.canvas.draw()
# прямоугольная область на плоскости
Lims = namedtuple('Lims', ['start', 'end'])
xlim = Lims(-5, 5)
ylim = Lims(-5, 5)
fig = plt.figure()
# подключение обработчика событий
fig.canvas.mpl_connect('motion_notify_event', on_move)
fig.canvas.mpl_connect('button_press_event', on_move)
ax = plt.axes(xlim=xlim, ylim=ylim)
ax.set_aspect('equal')
# оси координат
ax.hlines(0, xlim.start, xlim.end, lw=0.5)
ax.vlines(0, ylim.start, ylim.end, lw=0.5)
x = np.linspace(xlim.start, xlim.end, 21)
y = np.linspace(ylim.start, ylim.end, 21)
X, Y = np.meshgrid(x, y)
# нормирующий множитель, чтобы все векторы поля
# имели одинаковую длину
norm = np.hypot(1, f(X, Y))
# поле направлений
kwargs = {'angles':'xy', 'width':0.002, 'pivot':'mid'}
ax.quiver(X, Y, 1/norm, f(X, Y)/norm, **kwargs)
# линия, которая будет отрисовывать график решения
# при движении мыши
line, = ax.plot([], [], 'm')
plt.show()
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.8 on 2019-12-11 16:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_eveonline_connector', '0010_auto_20191211_1514'),
]
operations = [
migrations.AlterField(
model_name='evecharacter',
name='token',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_eveonline_connector.EveToken'),
),
]
|
nilq/baby-python
|
python
|
import time
import numpy as np
from yaaf.evaluation import Metric
class SecondsPerTimestepMetric(Metric):
def __init__(self):
super(SecondsPerTimestepMetric, self).__init__(f"Seconds Per Timestep")
self._deltas = []
self._last = None
def reset(self):
self._deltas = []
def __call__(self, timestep):
now = time.time()
delta = now - self._last if self._last is not None else 0.0
self._last = now
self._deltas.append(delta)
return delta
def result(self):
return np.array(self._deltas)
|
nilq/baby-python
|
python
|
from pytest import raises
from async_cog.tags import Tag
def test_tag_format() -> None:
tag = Tag(code=254, type=4, length=13)
assert tag.format_str == "13I"
assert tag.data_pointer is None
def test_tag_size() -> None:
tag = Tag(code=254, type=4, length=13)
assert tag.data_size == 52
def test_tag_name() -> None:
tag = Tag(code=34735, type=3, length=32, data_pointer=502)
assert tag.name == "GeoKeyDirectoryTag"
def test_tag_str() -> None:
tag = Tag(code=34735, type=3, length=32, data_pointer=502)
assert str(tag) == "GeoKeyDirectoryTag: None"
tag = Tag(code=257, type=3, length=1, value=256)
assert str(tag) == "ImageHeight: 256"
tag = Tag(code=258, type=3, length=3, value=[8, 8, 8])
assert str(tag) == "BitsPerSample: [8, 8, 8]"
def test_not_imlemented() -> None:
tag = Tag(code=34735, type=3, length=32, data_pointer=502)
with raises(NotImplementedError):
tag.parse_data(b"", "<")
|
nilq/baby-python
|
python
|
"""
RenameWidget:
This widget permit the rename of the output files in the MKVCommand
Also if files are drop from directories in the OS it will rename them.
"""
# LOG FW0013
import logging
import re
from pathlib import Path
from PySide2.QtCore import Signal, Qt, Slot
from PySide2.QtWidgets import (
QGridLayout,
QWidget,
QHBoxLayout,
QSizePolicy,
QGroupBox,
)
import vsutillib.pyqt as pyqt
from .. import config
from ..utils import Text
from .RenameWidgetHelpers import (
findDuplicates,
RegExFilesWidget,
RegExLineInputWidget,
RegExInputWidget,
resolveIncrements,
)
MODULELOG = logging.getLogger(__name__)
MODULELOG.addHandler(logging.NullHandler())
class RenameWidget(pyqt.TabWidgetExtension, QWidget):
"""Central widget"""
# pylint: disable=too-many-instance-attributes
# Defining elements of a GUI
# Class logging state
__log = False
outputRenameResultsSignal = Signal(str, dict)
outputOriginalFilesSignal = Signal(str, dict)
applyFileRenameSignal = Signal(list)
setFilesSignal = Signal(object)
setCurrentIndexSignal = Signal()
@classmethod
def classLog(cls, setLogging=None):
"""
get/set logging at class level
every class instance will log
unless overwritten
Args:
setLogging (bool):
- True class will log
- False turn off logging
- None returns current Value
Returns:
bool:
returns the current value set
"""
if setLogging is not None:
if isinstance(setLogging, bool):
cls.__log = setLogging
return cls.__log
def __init__(self, parent, controlQueue=None, log=None):
super(RenameWidget, self).__init__(parent=parent, tabWidgetChild=self)
self.__log = None
self.__output = None
self.__tab = None
self.parent = parent
self.controlQueue = controlQueue
self._outputFileNames = []
self._renameFileNames = []
self._initControls()
self._initUI()
self._initHelper()
self._bFilesDropped = False
self._bDuplicateRename = False
self.log = log
def _initControls(self):
#
# Input Lines
#
self.textRegEx = RegExLineInputWidget(Text.txt0200, Text.txt0201)
self.textSubString = RegExLineInputWidget(Text.txt0202, Text.txt0203)
self.textOriginalNames = RegExFilesWidget(Text.txt0204, Text.txt0205)
self.textOriginalNames.textBox.setReadOnly(True)
self.textOriginalNames.textBox.connectToInsertText(
self.outputOriginalFilesSignal
)
self.textOriginalNames.textBox.filesDroppedUpdateSignal.connect(
self._setFilesDropped
)
self.textRenameResults = RegExInputWidget(Text.txt0206, Text.txt0207)
self.textRenameResults.textBox.setReadOnly(True)
self.textRenameResults.textBox.connectToInsertText(
self.outputRenameResultsSignal
)
btnApplyRename = pyqt.QPushButtonWidget(
Text.txt0208,
function=self._applyRename,
margins=" ",
toolTip=Text.txt0209,
)
btnApplyRename.setEnabled(False)
btnUndoRename = pyqt.QPushButtonWidget(
Text.txt0210, function=self._undoRename, margins=" ", toolTip=Text.txt0211
)
btnUndoRename.setEnabled(False)
btnClear = pyqt.QPushButtonWidget(
Text.txt0212, function=self.clear, margins=" ", toolTip=Text.txt0213
)
self.btnGrid = QHBoxLayout()
self.btnGrid.addWidget(btnApplyRename)
self.btnGrid.addWidget(btnUndoRename)
self.btnGrid.addStretch()
self.btnGrid.addWidget(btnClear)
self.btnGroup = QGroupBox()
self.btnGroup.setLayout(self.btnGrid)
def _initUI(self):
inputGrid = QGridLayout()
#
# Input lines
#
inputGrid.addWidget(self.textRegEx, 0, 0, 1, 2)
inputGrid.addWidget(self.textSubString, 1, 0, 1, 2)
# buttons
inputGrid.addWidget(self.btnGroup, 2, 0, 1, 2)
gridWidget = QWidget()
gridWidget.setLayout(inputGrid)
gridWidget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
boxWidget = QWidget()
hboxLayout = QHBoxLayout()
hboxLayout.addWidget(self.textOriginalNames)
hboxLayout.addWidget(self.textRenameResults)
boxWidget.setLayout(hboxLayout)
grid = QGridLayout()
grid.setSpacing(5)
grid.addWidget(gridWidget, 0, 0, 2, 0, Qt.AlignTop)
grid.addWidget(boxWidget, 2, 0)
self.setLayout(grid)
def _initHelper(self):
maxCount = config.data.get(Key.MaxRegExCount)
# local signals
# self.setCurrentIndexSignal.connect(self._setCurrentIndex)
self.setFilesSignal.connect(self.setFiles)
self.textRegEx.cmdLine.currentTextChanged.connect(self._updateRegEx)
self.textSubString.cmdLine.currentTextChanged.connect(self._updateRegEx)
self.textOriginalNames.textBox.textChanged.connect(self.clearButtonState)
self.textRegEx.cmdLine.itemsChangeSignal.connect(
lambda: self.saveItems(Key.RegEx)
)
self.textSubString.cmdLine.itemsChangeSignal.connect(
lambda: self.saveItems(Key.SubString)
)
self.textOriginalNames.textBox.verticalScrollBar().valueChanged.connect(
self.scrollRenameChanged
)
self.textRenameResults.textBox.verticalScrollBar().valueChanged.connect(
self.scrollResultsChanged
)
if maxCount is not None:
self.textRegEx.cmdLine.setMaxCount(maxCount)
self.textSubString.cmdLine.setMaxCount(maxCount)
items = config.data.get(Key.RegEx)
self.textRegEx.cmdLine.addItems(items)
self.textRegEx.cmdLine.clearEditText()
items = config.data.get(Key.SubString)
self.textSubString.cmdLine.addItems(items)
self.textSubString.cmdLine.clearEditText()
self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(False)
def __bool__(self):
for n, r in zip(self._outputFileNames, self._renameFileNames):
if n != r:
return True
return False
@property
def log(self):
"""
class property can be used to override the class global
logging setting
Returns:
bool:
True if logging is enable False otherwise
"""
if self.__log is not None:
return self.__log
return RenameWidget.classLog()
@log.setter
def log(self, value):
"""set instance log variable"""
if isinstance(value, bool) or value is None:
self.__log = value
@property
def output(self):
return self.__output
@output.setter
def output(self, value):
self.__output = value
@Slot()
def saveItems(self, comboType):
"""
saveItems of ComboLineEdit use in widget
Args:
comboType (str): key indicating witch ComboListEdit
to save to config
"""
if comboType == Key.RegEx:
if self.textRegEx.cmdLine.count() > 0:
items = []
for i in range(0, self.textRegEx.cmdLine.count()):
items.append(self.textRegEx.cmdLine.itemText(i))
config.data.set(Key.RegEx, items)
if comboType == Key.SubString:
if self.textRegEx.cmdLine.count():
items = []
for i in range(0, self.textSubString.cmdLine.count()):
items.append(self.textSubString.cmdLine.itemText(i))
config.data.set(Key.SubString, items)
@Slot(object)
def setFiles(self, objCommand):
"""
setFile setup file names to work with
Args:
objCommand (MKVCommand): MKVCommand object containing the files
to rename
"""
self.textOriginalNames.textBox.clear()
self.textRenameResults.textBox.clear()
for f in objCommand.destinationFiles:
# show files
self.outputOriginalFilesSignal.emit(str(f.name) + "\n", {})
# save files
self._outputFileNames.append(f)
@Slot(int)
def scrollRenameChanged(self, value):
self.textRenameResults.textBox.verticalScrollBar().valueChanged.disconnect(
self.scrollResultsChanged
)
self.textRenameResults.textBox.verticalScrollBar().setValue(value)
self.textRenameResults.textBox.verticalScrollBar().valueChanged.connect(
self.scrollResultsChanged
)
@Slot(int)
def scrollResultsChanged(self, value):
self.textOriginalNames.textBox.verticalScrollBar().valueChanged.disconnect(
self.scrollRenameChanged
)
self.textOriginalNames.textBox.verticalScrollBar().setValue(value)
self.textOriginalNames.textBox.verticalScrollBar().valueChanged.connect(
self.scrollRenameChanged
)
def clear(self):
"""
clear reset widget working variables and widgets
"""
self._outputFileNames = []
self._renameFileNames = []
self._bFilesDropped = False
self.textRegEx.cmdLine.lineEdit().clear()
self.textSubString.cmdLine.lineEdit().clear()
self.textOriginalNames.textBox.clear()
self.textRenameResults.textBox.clear()
def clearButtonState(self):
"""Set clear button state"""
if self.textOriginalNames.textBox.toPlainText() != "":
self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(True)
else:
self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(False)
def connectToSetFiles(self, objSignal):
objSignal.connect(self.setFiles)
def setLanguage(self):
"""
setLanguage set labels according to locale
"""
for index in range(self.btnGrid.count()):
widget = self.btnGrid.itemAt(index).widget()
if isinstance(widget, pyqt.QPushButtonWidget):
widget.setLanguage()
#widget.setText(" " + _(widget.originalText) + " ")
#widget.setToolTip(_(widget.toolTip))
for w in [self.textRegEx, self.textSubString]:
w.lblText.setText(_(w.label) + ": ")
w.cmdLine.setToolTip(_(w.toolTip))
for w in [self.textOriginalNames, self.textRenameResults]:
w.lblText.setText(_(w.label) + ":")
w.textBox.setToolTip(_(w.toolTip))
w.repaint()
def _setFilesDropped(self, filesDropped):
if filesDropped:
self._outputFileNames = []
self._outputFileNames.extend(filesDropped)
self.textRenameResults.textBox.clear()
if not self._bFilesDropped:
self._bFilesDropped = True
self._updateRegEx()
else:
# receive when clear issued to FilesListWidget
self._outputFileNames = []
self.textRenameResults.textBox.clear()
self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(False)
self._bFilesDropped = False
def _displayRenames(self):
duplicateNames = findDuplicates(self._renameFileNames)
if duplicateNames:
self._bDuplicateRename = True
else:
self._bDuplicateRename = False
for f in self._renameFileNames:
of = Path(f)
try:
if (f in duplicateNames) or of.is_file():
self.outputRenameResultsSignal.emit(
str(f.name) + "\n", {"color": Qt.red}
)
else:
# check theme
self.outputRenameResultsSignal.emit(str(f.name) + "\n", {})
except OSError:
self.outputRenameResultsSignal.emit(str(f.name) + "\n", {})
def _updateRegEx(self):
rg = self.textRegEx.cmdLine.currentText()
subText = self.textSubString.cmdLine.currentText()
statusBar = self.parent.statusBar()
statusBar.showMessage("")
self.textRenameResults.textBox.clear()
self._renameFileNames = []
try:
regEx = re.compile(rg)
for f in self._outputFileNames:
strFile = f.stem
matchRegEx = regEx.sub(subText, strFile)
if matchRegEx:
objName = f.parent.joinpath(matchRegEx + f.suffix)
else:
objName = f
self._renameFileNames.append(objName)
resolveIncrements(self._outputFileNames, self._renameFileNames, subText)
self._displayRenames()
if self:
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True)
else:
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False)
except re.error:
self.textRenameResults.textBox.clear()
statusBar.showMessage(Text.txt0214)
if resolveIncrements(self._outputFileNames, self._renameFileNames, subText):
self._displayRenames()
if self:
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True)
else:
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False)
def _applyRename(self):
if self._bFilesDropped:
# self.applyFileRenameSignal.emit(self._renameFileNames)
filesPair = zip(self._outputFileNames, self._renameFileNames)
for oldName, newName in filesPair:
try:
oldName.rename(newName)
except FileExistsError:
pass
else:
self.applyFileRenameSignal.emit(self._renameFileNames)
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False)
self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(True)
def _undoRename(self):
if self._bFilesDropped:
filesPair = zip(self._renameFileNames, self._outputFileNames)
for oldName, newName in filesPair:
try:
oldName.rename(newName)
except FileExistsError:
pass
else:
self.applyFileRenameSignal.emit(self._outputFileNames)
self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True)
self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(False)
class ButtonIndex:
ApplyRename = 0
Undo = 1
Clear = 3
class Key:
RegEx = "RegEx"
SubString = "SubString"
MaxRegExCount = "MaxRegExCount"
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 Stian Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from triton.rigidbody2d import RigidBody2d
from triton.vector2d import Vector2d
class CoordScaler:
def __init__(self, screen_size, scale=1):
self.scale = scale
if self.scale is None:
self.adaptive_scale = True
self.scale = 1
self.screen_size = screen_size
self.translation = screen_size/2
def get_coords(self, cosmic_vect):
screen_coords = cosmic_vect * self.scale + self.translation
return screen_coords
if self.adaptive_scale:
if not 0 < screen_coords.x < screen_size.x:
pass
def main():
import pygame
from collections import deque
screen_scaler = CoordScaler(Vector2d(800, 800), 350.0 / 249209300000.0)
max_history = 10000
gravitational_const = 6.67384*10**-11
earth = RigidBody2d()
earth._mass = 5.97*10**24
earth.pos = Vector2d(149600000000.0, 0.0)
earth.vel = Vector2d(0.0, 29000.8)
earth_history = deque([screen_scaler.get_coords(earth.pos).tuple()], max_history)
mars = RigidBody2d()
mars._mass = 6.42*10**23
mars.pos = Vector2d(249209300000.0, 0.0)
mars.vel = Vector2d(0.0, 24000.077)
mars_history = deque([screen_scaler.get_coords(mars.pos).tuple()], max_history)
sun = RigidBody2d()
sun._mass = 1.989*10**30
sun.pos = Vector2d(0.0, 0.0)
t = 0
dt = 3600
screen = pygame.display.set_mode(screen_scaler.screen_size.tuple())
clock = pygame.time.Clock()
def gravity(ent1, ent2):
"""Returns a force vector from one body to another"""
diff = (ent2.pos-ent1.pos)
#Universal gravity
dist = diff.length_sq()
force = gravitational_const * ent1._mass * ent2._mass / dist
return diff.normalize() * force
def draw_history(screen, history_deque):
if len(history_deque) < 2:
return
pygame.draw.lines(
screen,
(150,150,150),
False,
history_deque,
1)
def int_tuple(tup):
return (int(tup[0]), int(tup[1]))
counter = 0
while not pygame.QUIT in [e.type for e in pygame.event.get()]:
counter += 1
earth_sun = gravity(earth, sun)
earth_mars = gravity(earth, mars)
sun_mars = gravity(sun, mars)
earth.apply_force(earth.pos, earth_sun)
earth.apply_force(earth.pos, earth_mars)
mars.apply_force(mars.pos, -sun_mars)
mars.apply_force(mars.pos, -earth_mars)
sun.apply_force(sun.pos, sun_mars)
sun.apply_force(sun.pos, -earth_sun)
sun.update(t, dt)
earth.update(t, dt)
mars.update(t, dt)
t += dt
print("Simulation time (in days): " + str(t/(3600*24)))
screen.fill((10, 10, 20))
# draw the sun
sun_screen_coords = int_tuple(screen_scaler.get_coords(sun.pos).tuple())
pygame.draw.circle(screen, (220,200,100), sun_screen_coords, 20, 0)
# draw the earth
earth_screen_coords = int_tuple(screen_scaler.get_coords(earth.pos).tuple())
pygame.draw.circle(screen, (50,50,200), earth_screen_coords, 10, 0)
if counter % 10 == 0:
earth_history.append(earth_screen_coords)
draw_history(screen, earth_history)
# draw mars
mars_screen_coords = int_tuple(screen_scaler.get_coords(mars.pos).tuple())
pygame.draw.circle(screen, (200,100,100), mars_screen_coords, 10, 0)
if counter % 10 == 0:
mars_history.append(mars_screen_coords)
draw_history(screen, mars_history)
pygame.display.flip()
pygame.time.wait(0)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
@author: Hasan Albinsaid
@site: https://github.com/hasanabs
"""
import matplotlib.pyplot as plt
import numpy as np
import itertools
import os
def nck(n,k):
return np.math.factorial(n)/np.math.factorial(k)/np.math.factorial(n-k)
def nchoosek(arr, k):
return np.array(list(itertools.combinations(arr, k)))
def optimum_RAC(all_RAC, n, r, size_comb):
ukuran=np.zeros(n,dtype=int)
while(len(all_RAC)>size_comb):
for i in range(n):
ukuran[i]=(all_RAC==i+1).sum()
idx_rem=0;
remaining_idx=np.arange(len(all_RAC))
sort_remove=np.argsort(-ukuran)
while(len(remaining_idx)>1):
old_remaining_idx=remaining_idx
remaining_idx=remaining_idx[np.where((all_RAC[remaining_idx,:]==sort_remove[idx_rem]+1))[0]]
if (len(remaining_idx)==0):
idx=0
while(len(remaining_idx)==0):
remaining_idx=old_remaining_idx[np.where((all_RAC[old_remaining_idx,:]==sort_remove[idx]+1))[0]]
idx+=1
idx_rem+=1
all_RAC=np.delete(all_RAC, (remaining_idx), axis=0)
return all_RAC
def bi2de(arr):
result=0
for i in range(len(arr)):result+=np.power(2,i)*arr[len(arr)-1-i]
return result
def de2bi(decimal, L_bit):
arr=np.zeros((1,L_bit), dtype=np.int8)
for i in range(L_bit):
arr[0,(L_bit-i-1)]=decimal%2
decimal=decimal>>1
return arr
def modulation(M):
if M==2: modulation=np.array([-1+0j, 1+0j])
elif M==4: modulation=np.array([-1-1j, -1+1j, 1+1j, 1-1j]/np.sqrt(2))
elif M==16: modulation=np.array([-3+3j, -3+1j, -3-3j, -3-1j,
-1+3j, -1+1j, -1-3j, -1-1j,
3+3j, 3+1j, 3-3j, 3-1j,
1+3j, 1+1j, 1-3j, 1-1j]/np.sqrt(10))
return modulation
def herm(matrix):
return np.transpose(np.conjugate(matrix))
def H(Nr, Nt):
return (np.random.randn(Nr,Nt)+np.random.randn(Nr,Nt)*1j)/np.sqrt(2)
def noise(SNR, Nr, Es):
return (np.random.randn(Nr,1)+np.random.randn(Nr,1)*1j)*np.sqrt(Es/np.power(10,(SNR)/10))/np.sqrt(2)
def plotter(Range, Error_bit, SNR_Min, SNR_Max, L, prop, Title, Label):
plt.figure(1)
ASBT = (np.ones((len(Error_bit),1)) - Error_bit)*L
plt.plot(Range, ASBT, prop, linewidth=1, label=Label)
plt.legend(loc='lower right', fontsize='x-large')
plt.axis([SNR_Min, SNR_Max, 2, 10.5])
plt.yscale('linear')
plt.xlabel('SNR[dB]')
plt.ylabel('ASBT')
plt.minorticks_on()
plt.grid(b=True, which='major')
plt.grid(b=True, which='minor',alpha=0.4)
plt.suptitle('ASBT '+ Label, fontsize='x-large', fontweight='bold')
plt.title(Title, fontsize='large', fontweight='book')
plt.show()
if not os.path.exists('../results'): os.makedirs('../results')
plt.savefig('../results/ASBT_'+Label+'.png')
plt.figure(2)
plt.plot(Range, Error_bit, prop, linewidth=1, label=Label)
plt.legend(loc='upper right', fontsize='x-large')
plt.axis([SNR_Min, SNR_Max, 6e-4, 1e-0])
plt.xscale('linear')
plt.yscale('log')
plt.xlabel('SNR[dB]')
plt.ylabel('BER')
plt.minorticks_on()
plt.grid(b=True, which='major')
plt.grid(b=True, which='minor',alpha=0.4)
plt.suptitle('BER ' + Label, fontsize='x-large', fontweight='bold')
plt.title(Title, fontsize='large', fontweight='book')
plt.show()
if not os.path.exists('../results'): os.makedirs('../results')
plt.savefig('../results/'+Label+'.png')
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
#-----------------------------------------------------------------------
# COPYRIGHT_BEGIN
# Copyright (C) 2016, FixFlyer, LLC.
# All rights reserved.
# COPYRIGHT_END
#-----------------------------------------------------------------------
class SessionStore(object):
""" """
class Listener(object):
""" """
def on_session(self, session_id, begin_string, sender_comp_id, target_comp_id, session_qualifier, trading_Session_id, last_seq):
pass
def add_session(self, session_id):
pass
def remove_session(self, session_id):
pass
def has_session(self, session_id):
pass
def get_session(self, session_id, trading_session_id_out, last_seq_out):
pass
def update_session(self, session_id, trasing_session_id, last_seq):
pass
def for_each_session(self, listener):
pass
|
nilq/baby-python
|
python
|
"""Process the markdown files.
The purpose of the script is to create a duplicate src directory within which
all of the markdown files are processed to match the specifications of building
a pdf from multiple markdown files using the pandoc library (***add link to
pandoc library documentation***) with pdf specific text rendering in mind as
well.
"""
import os
import subprocess
import re
from datetime import datetime
def run_shell_cmd(command):
"""Run shell/bash commands passed as a string using subprocess module."""
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = process.stdout.read()
return output.decode('utf-8')
def copy_src():
"""Duplicate src directory to a new but temp directory named 'src_copy'."""
# source and target directories
src_path = "../src/"
target_path = "src_copy"
# make new directory
mkdir_cmd = "mkdir "+target_path
run_shell_cmd(mkdir_cmd)
# copy contents of src directory
copy_cmd = "cp -R "+src_path+" "+target_path
run_shell_cmd(copy_cmd)
def copy_bids_logo():
"""Copy BIDS_logo.jpg from the BIDS_logo dir in the root of the repo."""
run_shell_cmd("cp ../BIDS_logo/BIDS_logo.jpg src_copy/src/images/")
def copy_images(root_path):
"""Copy images.
Will be done from images directory of subdirectories to images directory
in the src directory
"""
subdir_list = []
# walk through the src directory to find subdirectories named 'images'
# and copy contents to the 'images' directory in the duplicate src
# directory
for root, dirs, files in os.walk(root_path):
if 'images' in dirs:
subdir_list.append(root)
for each in subdir_list:
if each != root_path:
run_shell_cmd("cp -R "+each+"/images"+" "+root_path+"/images/")
def extract_header_string():
"""Extract the latest release's version number and date from CHANGES.md."""
released_versions = []
run_shell_cmd("cp ../mkdocs.yml src_copy/")
with open(os.path.join(os.path.dirname(__file__), 'src_copy/mkdocs.yml'), 'r') as file:
data = file.readlines()
header_string = data[0].split(": ")[1]
title = " ".join(header_string.split()[0:4])
version_number = header_string.split()[-1]
build_date = datetime.today().strftime('%Y-%m-%d')
return title, version_number, build_date
def add_header():
"""Add the header string extracted from changelog to header.tex file."""
title, version_number, build_date = extract_header_string()
header = " ".join([title, version_number, build_date])
# creating a header string with latest version number and date
header_string = ("\chead{ " + header + " }")
with open('header.tex', 'r') as file:
data = file.readlines()
# now change the last but 2nd line, note that you have to add a newline
data[-2] = header_string+'\n'
# re-write header.tex file with new header string
with open('header.tex', 'w') as file:
file.writelines(data)
def remove_internal_links(root_path, link_type):
"""Find and replace all cross and same markdown internal links.
The links will be replaced with plain text associated with it.
"""
if link_type == 'cross':
# regex that matches cross markdown links within a file
# TODO: add more documentation explaining regex
primary_pattern = re.compile(r'\[((?!http).[\w\s.\(\)`*/–]+)\]\(((?!http).+(\.md|\.yml|\.md#[\w\-\w]+))\)') # noqa: E501
elif link_type == 'same':
# regex that matches references sections within the same markdown
primary_pattern = re.compile(r'\[([\w\s.\(\)`*/–]+)\]\(([#\w\-._\w]+)\)')
for root, dirs, files in os.walk(root_path):
for file in files:
if file.endswith(".md"):
with open(os.path.join(root, file), 'r') as markdown:
data = markdown.readlines()
for ind, line in enumerate(data):
match = primary_pattern.search(line)
if match:
line = re.sub(primary_pattern,
match.group().split('](')[0][1:], line)
data[ind] = line
with open(os.path.join(root, file), 'w') as markdown:
markdown.writelines(data)
def modify_changelog():
"""Change first line of the changelog to markdown Heading 1.
This modification makes sure that in the pdf build, changelog is a new
chapter.
"""
with open('src_copy/src/CHANGES.md', 'r') as file:
data = file.readlines()
data[0] = "# Changelog"
with open('src_copy/src/CHANGES.md', 'w') as file:
file.writelines(data)
def edit_titlepage():
"""Add title and version number of the specification to the titlepage."""
title, version_number, build_date = extract_header_string()
with open('cover.tex', 'r') as file:
data = file.readlines()
data[-1] = ("\\textsc{\large "+version_number+"}" +
"\\\\[0.5cm]" +
"{\large " +
build_date +
"}" +
"\\\\[2cm]" +
"\\vfill" +
"\\end{titlepage}")
with open('cover.tex', 'w') as file:
data = file.writelines(data)
if __name__ == '__main__':
duplicated_src_dir_path = 'src_copy/src'
# Step 1: make a copy of the src directory in the current directory
copy_src()
# Step 2: copy BIDS_logo to images directory of the src_copy directory
copy_bids_logo()
# Step 3: copy images from subdirectories of src_copy directory
copy_images(duplicated_src_dir_path)
subprocess.call("mv src_copy/src/images/images/* src_copy/src/images/",
shell=True)
# Step 4: extract the latest version number, date and title
extract_header_string()
add_header()
edit_titlepage()
# Step 5: modify changelog to be a level 1 heading to facilitate section
# separation
modify_changelog()
# Step 6: remove all internal links
remove_internal_links(duplicated_src_dir_path, 'cross')
remove_internal_links(duplicated_src_dir_path, 'same')
|
nilq/baby-python
|
python
|
# django
from hashlib import sha256
from uuid import uuid4
from django.utils.text import slugify
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# python
from bs4 import BeautifulSoup
from mistune import Markdown, Renderer
def get_new_hash():
return sha256(str(uuid4().hex).encode("utf-8")).hexdigest()
def format_tags(tags):
return " ".join({slugify(tag.lower()) for tag in tags})
def second_convert(second):
second = int(second)
minutes = int(second / 60)
second -= minutes * 60
hours = int(second / (60 * 60))
second -= hours * (60 * 60)
days = int(second / (60 * 60 * 24))
second -= days * (60 * 60 * 24)
years = int(second / (60 * 60 * 24 * 365.25))
second -= years * (60 * 60 * 24 * 365.25)
return dict(y=years, d=days, h=hours, m=minutes, s=int(second))
def marktohtml(marktext):
renderer = Renderer(escape=False, parse_block_html=True)
markdown = Markdown(renderer=renderer)
return BeautifulSoup(markdown(marktext), "html.parser")
def get_first_image(body):
soup = marktohtml(body)
img = soup.find("img")
if img is not None:
return img.get("src", "")
def dor(body):
"duration of read -> second"
return body.__len__() / 28
class NextOrPrevious:
def __init__(self, model, filter_field, id):
self.model = model
self.filter_field = filter_field
self.id = id
def next_or_previous(self, next=True):
queryset = self.model.objects.filter(**self.filter_field)
try:
index = list(queryset).index(queryset.filter(id=self.id)[0])
except IndexError:
return False
else:
if next:
index = index - 1
else:
index = index + 1
try:
return queryset[index]
except (IndexError, AssertionError):
return False
@property
def next_query(self):
return self.next_or_previous()
@property
def previous_query(self):
return self.next_or_previous(False)
def send_mail(subject, template_name, context, to):
html_content = render_to_string(template_name, context)
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, to)
msg.attach_alternative(html_content, "text/html")
msg.send()
def get_client_url():
return f"?client_id={settings.GITHUB_AUTH.get('client_id')}&client_secret={settings.GITHUB_AUTH.get('client_secret')}"
def ready_tags(tags, limit=5):
return format_tags(tags.split(" ")[:limit])
|
nilq/baby-python
|
python
|
import attr
from jstruct import JStruct, JList, REQUIRED
from typing import Optional, List
@attr.s(auto_attribs=True)
class Appointment:
type: str
date: Optional[str] = None
time: Optional[str] = None
phone: Optional[str] = None
@attr.s(auto_attribs=True)
class Address:
postalCode: str
provinceCode: str
number: Optional[int] = None
countryCode: Optional[str] = None
name: Optional[str] = None
@attr.s(auto_attribs=True)
class Hazmat:
number: int
phone: str
@attr.s(auto_attribs=True)
class Parcel:
quantity: int
parcelType: str
id: Optional[int] = None
weight: Optional[int] = None
length: Optional[int] = None
depth: Optional[int] = None
width: Optional[int] = None
note: Optional[str] = None
status: Optional[int] = None
FCA_Class: Optional[str] = None
hazmat: Optional[Hazmat] = JStruct[Hazmat]
requestReturnLabel: Optional[bool] = None
returnWaybill: Optional[str] = None
@attr.s(auto_attribs=True)
class PromoCode:
code: Optional[str] = None
@attr.s(auto_attribs=True)
class Surcharge:
type: str
id: Optional[int] = None
value: Optional[str] = None
name: Optional[str] = None
amount: Optional[int] = None
@attr.s(auto_attribs=True)
class RateRequest:
category: str
paymentType: str
deliveryType: str
unitOfMeasurement: str
sender: Address = JStruct[Address, REQUIRED]
consignee: Address = JStruct[Address, REQUIRED]
parcels: List[Parcel] = JList[Parcel, REQUIRED]
billing: Optional[int] = None
promoCodes: Optional[List[PromoCode]] = JList[PromoCode]
surcharges: Optional[List[Surcharge]] = JList[Surcharge]
appointment: Optional[Appointment] = JStruct[Appointment]
@attr.s(auto_attribs=True)
class TaxesDetail:
type: Optional[str] = None
amount: Optional[str] = None
name: Optional[str] = None
@attr.s(auto_attribs=True)
class Rate:
grossAmount: Optional[int] = None
discountAmount: Optional[int] = None
otherCharge: Optional[int] = None
fuelChargePercentage: Optional[int] = None
accountType: Optional[str] = None
rateType: Optional[str] = None
cubicWeight: Optional[float] = None
basicCharge: Optional[float] = None
weightCharge: Optional[float] = None
surcharges: List[Surcharge] = JList[Surcharge]
subTotal: Optional[float] = None
unitOfMeasurement: Optional[str] = None
taxesDetails: List[TaxesDetail] = JList[TaxesDetail]
taxes: Optional[float] = None
fuelCharge: Optional[float] = None
zoneCharge: Optional[float] = None
total: Optional[float] = None
@attr.s(auto_attribs=True)
class Reference:
code: Optional[int] = None
type: Optional[str] = None
@attr.s(auto_attribs=True)
class RateResponse:
delay: Optional[int] = None
terminalLimit: Optional[int] = None
singleShipmentCost: Optional[int] = None
quantity: Optional[int] = None
rates: List[Rate] = JList[Rate]
references: List[Reference] = JList[Reference]
unitOfMeasurement: Optional[str] = None
parcelType: Optional[str] = None
weight: Optional[str] = None
postalCodeDelivery: Optional[str] = None
postalCodePickup: Optional[str] = None
creator: Optional[str] = None
date: Optional[str] = None
warning: Optional[str] = None
|
nilq/baby-python
|
python
|
# Project Euler Problem 19 Solution
#
# Problem statement:
# You are given the following information, but you may prefer to
# do some research for yourself.
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on
# a century unless it is divisible by 400. How many Sundays fell on
# the first of the month during the twentieth century (1 Jan 1901 to
# 31 Dec 2000)?
#
# Solution description:
# Bruteforce solution: Implements a simple calendar, iterates over
# all the days and counts the number of Sundays that fell on the
# first of a month
#
# Fast solution: Iterates only over the relevant dates and
# uses Zeller's congruence
# (https://en.wikipedia.org/wiki/Zeller%27s_congruence) to figure
# out the weekday of each first day of a month
#
# Author: Tom Praschan
# Date: 2019/02/17
# License: MIT (see ../LICENSE.md)
import time
def is_leapyear(year):
"""
Returns True if year is a leap year and false otherwise
"""
return year % 4 == 0 and not (year % 100 == 0 and year % 400 != 0)
def days_per_month(month, year):
"""
Given a month (1=january, 2=february, etc.) this function
returns the number of days in that month (leap years are)
taken into account
"""
if month in [1, 3, 5, 7, 8, 10, 12]:
return 31
elif month in [4, 6, 9, 11]:
return 30
elif month == 2:
return 29 if is_leapyear(year) else 28
raise ValueError("The provided month m must fullfill 1 <= m <= 12!")
def bruteforce_solution():
weekday = 1 # 1 = Monday, 2 = Tueday, ..., 7 = Sunday
day = 1
month = 1
year = 1900
num_sundays = 0
while not (day == 31 and month == 12 and year == 2000):
# Count sundays that fell on the first day of a month
# Remember that we only start counting after 1901!
if day == 1 and weekday == 7 and year >= 1901:
num_sundays += 1
# Increment date and weekday using modular arithmetic
day = day % days_per_month(month, year) + 1
weekday = weekday % 7 + 1
# Increment month
if day == 1:
month = month % 12 + 1
# Increment year
if day == 1 and month == 1:
year += 1
return num_sundays
def zellers_congruence(day, month, year):
"""
For a given date year/month/day this algorithm returns
the weekday of that date (1 = Monday, 2 = Tuesday, etc.)
For details see https://en.wikipedia.org/wiki/Zeller%27s_congruence
"""
# Consistent variable names with the formula on on Wikipedia
q = day
if month >= 3:
m = month # pragma: no cover
else:
m = month + 12
year -= 1
K = year % 100
J = year // 100
h = (q + (13 * (m + 1)) // 5 + K + K // 4 + J // 4 + 5*J) % 7
# Convert to ISO
return ((h + 5) % 7) + 1
def fast_solution():
num_sundays = 0
for year in range(1901, 2001):
for month in range(1, 13):
if zellers_congruence(1, month, year) == 7:
num_sundays += 1
return num_sundays
if __name__ == "__main__":
start = time.time()
solution = bruteforce_solution()
end = time.time()
print(f"Bruteforce Solution: {solution}")
print(f"Elapsed time: {end - start:.6}s")
start = time.time()
solution = fast_solution()
end = time.time()
print(f"Fast Solution (Zeller's congruence): {solution}")
print(f"Elapsed time: {end - start:.6}s")
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""Executes Android Monkey stress test over adb to attached Android device."""
__author__ = 'jeff.carollo@gmail.com (Jeff Carollo)'
import datetime
import json
import logging
import os
import subprocess
import sys
import time
from tasklib import apklib
ADB_COMMAND = apklib.ADB_COMMAND
MONKEY_COMMAND = ADB_COMMAND + 'shell "/system/bin/monkey -p %s --ignore-timeouts --kill-process-after-error -v 5000 --pct-touch 90 --pct-trackball 10 -s 10 %s; echo $? > /data/local/tmp/ret"'
STDOUT_FILENAME = 'cmd_stdout.log'
STDERR_FILENAME = 'cmd_stderr.log'
def ExitWithErrorCode(error_code):
if error_code == 0:
logging.warning('Error code is zero, maaking it non-zero')
error_code = -7
sys.exit(error_code)
def main(argv):
my_name = argv.pop(0)
try:
apk_file_path = argv.pop(0)
except:
sys.stderr.write('Must give apk_file_path as first argument.\n')
sys.exit(-1)
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
result_metadata = {}
try:
manifest = apklib.ReadAndroidManifest(apk_file_path)
result_metadata[u'AndroidManifest.xml'] = manifest.encode('utf-8')
class_path = apklib.FindClassPath(manifest)
logging.info('Found class_path: %s', class_path)
logging.info('Installing .apk...')
try:
output = subprocess.check_output(
ADB_COMMAND + 'install -r %s' % apk_file_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb install error %d:\n%s', e.returncode, e.output)
try:
logging.info('Signing .apk...')
apklib.SignApk(apk_file_path)
output = subprocess.check_output(
ADB_COMMAND + 'install -r %s' % apk_file_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb install error %d:\n%s', e.returncode, e.output)
ExitWithErrorCode(e.returncode)
try:
logging.info('Running command...')
cmd_stdout = open(STDOUT_FILENAME, 'w')
cmd_stderr = open(STDERR_FILENAME, 'w')
command = MONKEY_COMMAND % (class_path, ' '.join(argv))
try:
timeout = datetime.timedelta(0, 900) # Give the thing 15 minutes.
begin_time = datetime.datetime.now()
timeout_time = begin_time + timeout
process = subprocess.Popen(args=command,
stdout=cmd_stdout,
stderr=cmd_stderr,
shell=True)
ret = None
while None == ret and (datetime.datetime.now() < timeout_time):
time.sleep(0.02)
ret = process.poll()
finished_time = datetime.datetime.now()
execution_time = finished_time - begin_time
logging.info('execution_time: %s', execution_time)
if finished_time >= timeout_time and (None == ret):
logging.error('command %s timed out.', command)
process.terminate()
process.wait()
ret = 0
elif ret == 0:
# Only write execution_time if we didn't time out or fail.
result_metadata['execution_time'] = execution_time.total_seconds()
apklib.CheckAdbShellExitCode()
if ret != 0:
logging.error('adb command exited with code %s', ret)
ExitWithErrorCode(ret)
except subprocess.CalledProcessError, e:
logging.error('Error %d:\n%s', e.returncode, e.output)
ExitWithErrorCode(e.returncode)
finally:
apklib.WriteResultMetadata(result_metadata)
cmd_stdout.flush()
cmd_stdout.close()
cmd_stderr.flush()
cmd_stderr.close()
logging.info('Uninstalling .apk...')
try:
output = subprocess.check_output(
ADB_COMMAND + 'uninstall %s' % class_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb uninstall error %d:\n%s', e.returncode, e.output)
# Don't fail just because uninstall didn't work.
try:
# Inspect and dump to logs the cmd stdout output.
cmd_stdout = open(STDOUT_FILENAME, 'r')
stdout_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stdout, sys.stdout)
except Exception, e:
logging.error('Error while dumping command stdout: %s', str(e))
stdout_exitcode = -5 # Don't exit yet, allow stderr to be dumped.
finally:
cmd_stdout.close()
try:
# Inspect and dump to logs the cmd stderr output.
cmd_stderr = open(STDERR_FILENAME, 'r')
stderr_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stderr, sys.stderr)
except Exception, e:
logging.error('Error while dumping command stderr: %s', str(e))
stderr_exitcode = -5
finally:
cmd_stderr.close()
if stdout_exitcode != 0:
logging.info('Error found in stdout.')
ExitWithErrorCode(stdout_exitcode)
if stderr_exitcode != 0:
logging.info('Error found in stderr.')
ExitWithErrorCode(stderr_exitcode)
logging.info('Monkey work done successfully.')
return 0
finally:
logging.shutdown()
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
from vol import Vol
from net import Net
from trainers import Trainer
from util import *
import os
from random import shuffle, sample, random
from sys import exit
embeddings = None
training_data = None
testing_data = None
network = None
t = None
N = None
tokens_l = None
def load_data():
global embeddings, N, tokens_l
embeddings = {}
raw = file('./data/word_projections-80.txt').read()
raw = raw[9:]
raw = raw.split('\n')
for elem in raw:
try:
data = elem.split()
word = data[0].lower()
vector = [ float(v) for v in data[1:] ]
embeddings[word] = vector
except:
continue
path = './data/text/train_tiny'
words = list(token
for fname in os.listdir(path)
for token in file(os.path.join(path, fname)).read().split())
tokens = set(words)
tokens_l = list(tokens)
N = len(tokens)
print 'Corpus size: {} words'.format(N)
step = 4
data = []
for n in xrange(0, len(words) - step):
w1, w2, w3, pred = words[n:n+step]
if not (w1 in embeddings and w2 in embeddings and w3 in embeddings
and pred in embeddings and pred in tokens): continue
V = Vol(embeddings[w1] + embeddings[w2] + embeddings[w3])
label = tokens_l.index(pred)
data.append((V, label))
return data
def start():
global training_data, testing_data, network, t, N
all_data = load_data()
shuffle(all_data)
size = int(len(all_data) * 0.1)
training_data, testing_data = all_data[size:], all_data[:size]
print 'Data loaded, size: {}...'.format(len(all_data))
layers = []
layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 240})
layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'sigmoid'})
layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'})
layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'})
layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'})
#layers.append({'type': 'conv', 'sx': 1, 'filters': 240, 'pad': 0}) #lookup table like
#layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'tanh', 'drop_prob': 0.5})
#layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'tanh', 'drop_prob': 0.5})
layers.append({'type': 'softmax', 'num_classes': N})
print 'Layers made...'
network = Net(layers)
print 'Net made...'
print network
t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001});
def train():
global training_data, network, t
print 'In training...'
print 'k', 'time\t\t ', 'loss\t ', 'training accuracy'
print '----------------------------------------------------'
try:
for x, y in training_data:
stats = t.train(x, y)
print stats['k'], stats['time'], stats['loss'], stats['accuracy']
except KeyboardInterrupt:
pass
finally:
saveJSON('./models/next_word_embeddings/network.json', network.toJSON())
def test_text(text, ngenerate=10, delete=True):
out = ''
for n in xrange(ngenerate):
x = []
words = text.split()
for word in words:
if word not in embeddings:
return 'word: {} not in corpus'.format(word)
else:
x.extend(embeddings[word])
output = network.forward(Vol(x)).w
pred = network.getPrediction()
new = tokens_l[pred] if random() < 0.5 else \
weightedSample(embeddings.keys(), output)
out += ' ' + new
text = ' '.join(words[1:] + [new])
return out
def test():
global testing_data, network
try:
print 'In testing...'
right = 0
for x, y in testing_data:
network.forward(x)
right += network.getPrediction() == y
accuracy = float(right) / len(testing_data)
print accuracy
except KeyboardInterrupt:
pass
finally:
print test_text('the answer is')
print test_text('i did this')
|
nilq/baby-python
|
python
|
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from enum import Enum
class OutputFormat(Enum):
table = 0
csv = 1
class StatsFilter(Enum):
all = 0
conf = 1
usage = 2
req = 3
blk = 4
err = 5
|
nilq/baby-python
|
python
|
import networkx as nx
import numpy as np
import math
def create_network (correct_answers, data, p_factor, realmodelQ, n_edges_score):
#correct_answers is a string which assumes the following values: True, False, "All"
#p_factor is a bool that assumes the value True if the factor (1-p) is to be considered for the weights and False otherwise
#realmodelQ is a Bool identifying if the network is for a real model (True) or not (False)
#n_edges_score is the number of edges to be considered when computing the score
#Load dataset
dataset = data
#initialize the directed graph (A -> B iff A answered to question x before B and both chose the same option;
#the weight is a value >0 and <1, and multiple of 1/num_questions)
connected_students = nx.DiGraph()
#Get list of usernames
students = dataset.username.unique()
#Add the students (nodes) to the graph
connected_students.add_nodes_from(students)
#Get number of quizzes
num_quizzes = len(dataset.quiz_id.unique())
#Get total number of questions considering all the quizzes
total_num_questions = len(dataset.quiz_question_id.unique())
#Initialize a dictionary with the students' performance (% of correct answers)
students_performance = {}
#Initialize a dictionary with the % of choice for each option_id in the set of all quizzes and questions
percent_options = {}
#Initialize a dictionary with the % of correct answers for each question in the set of all quizzes
percent_correct_questions = {}
#Initialize a dictionary of the edge colors
edge_colors = {}
#Initialize a dictionary with the correspondence of -> question: quiz
questions_by_quiz = {}
#Initialize a dictionary with the ranks of quizzes
rank_quizzes = {}
#Initialize the rank var
rank = 0
#Initialize a dictionary with the correspondence of -> quiz: number of questions
num_quest_by_quiz = {}
#Initialize a dictionary with the number of quizzes each student participated
num_question_participations = {}
#Initialize a dictionary that has as keys the questions and as values dicionaries with keys the student and values 1 if
#his/her answer is correct or 0 otherwise
correct_question_per_student = {}
#Initialize a dictionary that has as keys the questions and as values their solution frequency (higher values means that the question is easier)
sol_freq_per_question = {}
#Initialize a dictionary that has as keys the questions and as values their solution frequency penalized (higher values means that the question is easier)
sol_freq_per_question_penalized = {}
for i in dataset.quiz_id.unique(): #run the list of possible quizzes to compute edges and weights
#print("quiz_id =", i)
#Get the subdataset for each quiz
dataset_quiz_i = dataset.loc[dataset['quiz_id'] == i]
#Update the dictionary with the rank of quizzes
rank_quizzes[i] = rank
#Get number of questions of this quiz(to compute edge's weights) - each quiz has its won factor, given the number of questions
num_questions = len(dataset_quiz_i.quiz_question_id.unique())
#Store the number of questions on this quiz
num_quest_by_quiz[str(i)] = num_questions
#Sort dataset for quiz i by quiz_question_id and answer_date
dataset_quiz_i = dataset_quiz_i.sort_values(by=['quiz_question_id', 'answer_date'], ascending = [True, True])
for question in dataset_quiz_i.quiz_question_id.unique(): #run the list of possible question_id
#Initialize the empty dictionary for this question
correct_question_per_student[question] = {}
#print("question =", question)
#Get the subdataset for each question_id
dataset_qi = dataset_quiz_i.loc[dataset_quiz_i['quiz_question_id'] == question]
#Get list of students which participated in this question
participating_students = dataset_qi.username.unique()
for participant in participating_students:
if participant in num_question_participations.keys():
num_question_participations[participant] += 1
else:
num_question_participations[participant] = 1
#Update the dictionary with the correspondence of -> question: quiz
questions_by_quiz[question] = i
#Initialize the percentage of correct answers for this question
percent_correct_answers = 0
#Get the percentage for each option_id/correct answers in this question
for user in range(len(dataset_qi)):
#Get user name
username = dataset_qi['username'].iloc[user]
#Get the option_id chosen by this user
option_chosen = dataset_qi['option_id'].iloc[user]
#Check if the option chosen is correct or not
is_correct = dataset_qi['correct'].iloc[user]
#If the option chosen is correct, update the percentage of correct answers value
if is_correct:
percent_correct_answers += 1/len(dataset_qi)
#save the information on this student's answer
correct_question_per_student[question][username] = 1
else:
#save the information on this student's answer
correct_question_per_student[question][username] = 0
#if the option_id is not in the percent's dictionary add it
if option_chosen not in percent_options:
percent_options[option_chosen] = 1/len(dataset_qi)
#else update its percentage
else:
percent_options[option_chosen] += 1/len(dataset_qi)
if percent_options[option_chosen]>1:
#Do not let this percentage to be greater than 1
percent_options[option_chosen] = 1
#Add to the dictionary the percentage of correct answers for this question
percent_correct_questions[question] = percent_correct_answers
#Evaluate which kind of connections we wish to analyse: only the True/False or All of them
if isinstance(correct_answers, bool):
for j in range(len(dataset_qi)):
userj = dataset_qi['username'].iloc[j]
#Get the option_id chosen by userj
option_chosen_j = dataset_qi['option_id'].iloc[j]
#if the answer is correct
if dataset_qi['correct'].iloc[j]:
value = 1
#if the answer is incorrect
else:
value = 0
#if the user is not in the performance's dictionary add it
if userj not in students_performance:
students_performance[userj] = value
#else update its performance
else:
students_performance[userj] += value
#if its response is in accordance with the value of correct_answers, study the following users
if dataset_qi['correct'].iloc[j] == correct_answers:
#create an edge between every student wich answered after the current one and chose the same option_id
for k in range(j+1, len(dataset_qi)):
userk = dataset_qi['username'].iloc[k]
#Get the option_id chosen by userk
option_chosen_k = dataset_qi['option_id'].iloc[k]
#if both students chose the same option
if option_chosen_j == option_chosen_k:
#if the edge already exists, update its weight
if connected_students.has_edge(userj, userk):
if p_factor:
connected_students[userj][userk]['weight'] += 1/num_questions * (1 - percent_options[option_chosen_j])
else:
connected_students[userj][userk]['weight'] += 1/num_questions
#if the edge does not exist, add it
else:
if p_factor:
connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions * (1 - percent_options[option_chosen_j]))])
else:
connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions)])
elif correct_answers == "All":
#run then subdataset for question_id=i to create edges between students
for j in range(len(dataset_qi)):
userj = dataset_qi['username'].iloc[j]
#Get the option_id chosen by userj
option_chosen_j = dataset_qi['option_id'].iloc[j]
#if the answer is correct
if dataset_qi['correct'].iloc[j]:
value = 1
#else the answer is incorrect
else:
value = 0
#if the user is not in the performance's dictionary add it
if userj not in students_performance:
students_performance[userj] = value
#else update its performance
else:
students_performance[userj] += value
#create an edge between every student wich answered after the current one and chose the same option_id
for k in range(j+1, len(dataset_qi)):
userk = dataset_qi['username'].iloc[k]
#Get the option_id chosen by userk
option_chosen_k = dataset_qi['option_id'].iloc[k]
#if both students chose the same option
if option_chosen_j == option_chosen_k:
#if the edge already exists, update its weight
if connected_students.has_edge(userj, userk):
if p_factor:
connected_students[userj][userk]['weight'] += 1/num_questions * (1 - percent_options[option_chosen_j])
else:
connected_students[userj][userk]['weight'] += 1/num_questions
#if the edge does not exist, add it
else:
if p_factor:
connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions * (1 - percent_options[option_chosen_j]) )])
else:
connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions)])
#Sort the dictionary for each question by student username
# FIX: A Username may not be transformable into a float. Re
# correct_question_per_student[question] = dict(sorted(correct_question_per_student[question].items(), key=lambda item: float(item[0])))
if realmodelQ:
#Compute the solution frequency for each question
sol_freq_per_question[question] = (1/len(correct_question_per_student[question])) * sum([value for value in correct_question_per_student[question].values()])
#Compute the solution frequency penalized for each question
if sol_freq_per_question[question] != 1:
sol_freq_per_question_penalized[question] = math.log(sol_freq_per_question[question] / (1 - sol_freq_per_question[question] ))
if realmodelQ:
#Sort questions by difficulty (easier - solution frequency higher - first)
sol_freq_per_question = dict(sorted(sol_freq_per_question.items(), key=lambda item: item[1], reverse=True))
#Increment the value of the rank
rank += 1
#Compute the cheating indicators statistics for each student
score_U1 = {}
score_U3 = {}
score_CS = {}
if realmodelQ:
for alumn in students:
#U1 Statistic
numerator = 0
#get the sum score for this student
sum_score = 0
num_ques = len(sol_freq_per_question)
ordered_questions = [key for key in sol_freq_per_question.keys()]
for q in ordered_questions:
if alumn in correct_question_per_student[q].keys():
sum_score += correct_question_per_student[q][alumn]
for qu in range(num_ques-1):
for que in range(qu+1,num_ques):
if alumn in correct_question_per_student[ordered_questions[qu]].keys() and alumn in correct_question_per_student[ordered_questions[que]].keys():
if correct_question_per_student[ordered_questions[qu]][alumn] < correct_question_per_student[ordered_questions[que]][alumn]:
numerator += 1
if sum_score > 0 and sum_score < num_ques:
score_U1[alumn] = numerator / (sum_score * (num_ques - sum_score))
else:
score_U1[alumn] = 0
#Sort dictionary
score_U1 = dict(sorted(score_U1.items(), key=lambda item: item[1], reverse=True))
#U3 Statistic & CS Statistic
first_term = 0
first_term_CS = 0
for w in range(sum_score):
if ordered_questions[w] in sol_freq_per_question_penalized.keys():
first_term += sol_freq_per_question_penalized[ordered_questions[w]]
first_term_CS += sol_freq_per_question[ordered_questions[w]]
second_term = 0
second_term_CS = 0
third_term_CS = 0
for y in range(num_ques):
if alumn in correct_question_per_student[ordered_questions[y]].keys():
if ordered_questions[y] in sol_freq_per_question_penalized.keys():
second_term += correct_question_per_student[ordered_questions[y]][alumn] * sol_freq_per_question_penalized[ordered_questions[y]]
second_term_CS += correct_question_per_student[ordered_questions[y]][alumn] * sol_freq_per_question[ordered_questions[y]]
third_term_CS += sol_freq_per_question[ordered_questions[y]]
third_term = 0
for x in range(num_ques - sum_score + 1 - 1, num_ques):
if ordered_questions[x] in sol_freq_per_question_penalized.keys():
third_term += sol_freq_per_question_penalized[ordered_questions[x]]
if sum_score > 0 and sum_score < num_ques:
score_U3[alumn] = (first_term - second_term) / (first_term - third_term)
else:
score_U3[alumn] = 0
#Sort dictionary
score_U3 = dict(sorted(score_U3.items(), key=lambda item: item[1], reverse=True))
if sum_score > 0 and sum_score < num_ques:
score_CS[alumn] = (num_ques * (first_term_CS - second_term_CS)) / (num_ques * first_term_CS - sum_score * third_term_CS)
else:
score_CS[alumn] = 0
#Sort dictionary
score_CS = dict(sorted(score_CS.items(), key=lambda item: item[1], reverse=True))
num_questions_total = np.max([value for value in num_question_participations.values()])
#Get classification of correct answers (0-20) in the dictionary
students_performance = {k: round(v/num_questions_total*20,2) for k, v in students_performance.items()}
#Define node color based on the performance
color_map = {}
#Assign color to each node
for key in students_performance:
if students_performance[key] >= 19:
color_map[key] = 'DarkGreen'
elif students_performance[key] >= 17:
color_map[key] = 'Green'
elif students_performance[key] >= 15:
color_map[key] = 'OliveDrab'
elif students_performance[key] >= 13:
color_map[key] = 'ForrestGreen'
elif students_performance[key] >= 10:
color_map[key] = 'YellowGreen'
elif students_performance[key] >= 7:
color_map[key] = 'GreenYellow'
else:
color_map[key] = 'PaleGreen'
#Get list of graph's edges
edges_data = list(connected_students.edges.data())
#Compute students' scores
#Create dictionary with scores per student (in and out)
students_score_in = {}
students_score_out = {}
for node in connected_students.nodes():
#List of ingoing weights for this node
ingoing_edges_weights = [e[2]['weight'] for e in edges_data if e[1] == str(node)]
#Sort list of weights
ingoing_edges_weights = sorted(ingoing_edges_weights, reverse=True)
#Ingoing score (consumption)
#Get the three highest values of weight
n_highest_in = ingoing_edges_weights[:n_edges_score]
#If there are no ingoing edges the score is 0
if n_highest_in != []:
students_score_in[node] = sum(n_highest_in)
else:
students_score_in[node] = 0
#List of ingoing weights for this node
outgoing_edges_weights = [e[2]['weight'] for e in edges_data if e[0] == str(node)]
#Sort list of weights
outgoing_edges_weights = sorted(outgoing_edges_weights, reverse=True)
#Outgoing score (sharing)
#Get the three highest values of weight
n_highest_out = outgoing_edges_weights[:n_edges_score]
#If there are no ingoing edges the score is 0
if n_highest_out != []:
students_score_out[node] = sum(n_highest_out)
else:
students_score_out[node] = 0
#Sort the dictionaries by values
students_score_in = dict(sorted(students_score_in.items(), key=lambda item: item[1], reverse=True))
students_score_out = dict(sorted(students_score_out.items(), key=lambda item: item[1], reverse=True))
return [students_score_in, students_score_out]
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import (
EconomicAssessment,
EconomicImpactAssessment,
ResolvabilityAssessment,
StrategicAssessment,
)
@admin.register(EconomicImpactAssessment)
class EconomicImpactAssessmentAdmin(admin.ModelAdmin):
pass
@admin.register(EconomicAssessment)
class EconomicAssessmentAdmin(admin.ModelAdmin):
pass
@admin.register(StrategicAssessment)
class StrategicAssessmentAdmin(admin.ModelAdmin):
pass
@admin.register(ResolvabilityAssessment)
class ResolvabilityAssessmentAdmin(admin.ModelAdmin):
pass
|
nilq/baby-python
|
python
|
import textwrap
from pathlib import Path
import pyexasol
import pytest
from exasol_udf_mock_python.column import Column
from exasol_udf_mock_python.connection import Connection
from exasol_udf_mock_python.group import Group
from exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment
from exasol_udf_mock_python.mock_meta_data import MockMetaData
from exasol_udf_mock_python.udf_mock_executor import UDFMockExecutor
from exasol_data_science_utils_python.preprocessing.sql.schema.schema_name import SchemaName
from exasol_bucketfs_utils_python.bucketfs_factory import BucketFSFactory
@pytest.fixture(scope="session")
def db_connection():
db_connection = Connection(address=f"localhost:8888", user="sys", password="exasol")
return db_connection
@pytest.fixture(scope="session")
def pyexasol_connection(db_connection):
conn = pyexasol.connect(dsn=db_connection.address, user=db_connection.user, password=db_connection.password)
return conn
@pytest.fixture(scope="session")
def upload_language_container(pyexasol_connection, language_container):
container_connection = Connection(address=f"http://localhost:6583/default/container;bfsdefault",
user="w", password="write")
bucket_fs_factory = BucketFSFactory()
container_bucketfs_location = \
bucket_fs_factory.create_bucketfs_location(
url=container_connection.address,
user=container_connection.user,
pwd=container_connection.password,
base_path=None)
container_path = Path(language_container["container_path"])
alter_session = Path(language_container["alter_session"])
pyexasol_connection.execute(f"ALTER SYSTEM SET SCRIPT_LANGUAGES='{alter_session}'")
pyexasol_connection.execute(f"ALTER SESSION SET SCRIPT_LANGUAGES='{alter_session}'")
with open(container_path, "rb") as container_file:
container_bucketfs_location.upload_fileobj_to_bucketfs(container_file, "ml.tar")
@pytest.fixture(scope="session")
def create_input_table(pyexasol_connection):
pyexasol_connection.execute("""
CREATE OR REPLACE TABLE TEST.ABC(
P1 INTEGER,
P2 INTEGER,
A FLOAT,
B FLOAT,
C FLOAT
)
""")
for i in range(1, 100):
if i % 100 == 0:
print(f"Insert {i}")
values = ",".join([f"({j % 2},{i % 2},{j * 1.0 * i}, {j * 2.0 * i}, {j * 3.0 * i})" for j in range(1, 100)])
pyexasol_connection.execute(f"INSERT INTO TEST.ABC VALUES {values}")
print("COUNT", pyexasol_connection.execute("SELECT count(*) FROM TEST.ABC").fetchall())
def drop_and_create_target_schema(pyexasol_connection):
try:
pyexasol_connection.execute("""
DROP SCHEMA TARGET_SCHEMA CASCADE;
""")
except:
pass
pyexasol_connection.execute("""CREATE SCHEMA TARGET_SCHEMA;""")
def udf_wrapper():
from exasol_udf_mock_python.udf_context import UDFContext
from sklearn.linear_model import SGDRegressor
from numpy.random import RandomState
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \
ColumnDescriptionBasedTablePreprocessorFactory
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \
ColumnPreprocessorDescription
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \
ExactColumnNameSelector
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \
MinMaxScalerFactory
from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF
train_udf = PartialFitRegressionTrainUDF()
def run(ctx: UDFContext):
model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False,
fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling')
table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory(
input_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("A"),
column_preprocessor_factory=MinMaxScalerFactory()
),
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("B"),
column_preprocessor_factory=MinMaxScalerFactory()
),
],
target_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("C"),
column_preprocessor_factory=MinMaxScalerFactory()
),
]
)
train_udf.run(exa, ctx, model, table_preprocessor_factory)
def test_train_udf_with_mock_random_partitions(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 3
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=3,
split_by_columns=None,
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_node(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 1
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=True,
number_of_random_partitions=None,
split_by_columns=None,
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_columns(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 4
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=None,
split_by_columns="P1,P2",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_random_partitions_and_split_by_columns(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 6
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=3,
split_by_columns="P1",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_node_and_random_partitions(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=True,
number_of_random_partitions=2,
split_by_columns=None
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_columns_empty_string(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=2,
split_by_columns="",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_multiple_groups(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
number_of_groups = 2
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=2,
split_by_columns="",
number_of_groups=number_of_groups
)
unique_model_id_in_base_models = {row[1] for row in fitted_base_models}
assert len(fitted_base_models) == expected_number_of_base_models * number_of_groups
assert len(unique_model_id_in_base_models) == number_of_groups
assert len(unique_base_models) == expected_number_of_base_models * number_of_groups
assert len(fitted_combined_models) == 1 * number_of_groups
assert len(result) == number_of_groups
for group in result:
assert len(group.rows) == 1
def run_mock_test_valid(db_connection,
pyexasol_connection,
split_by_node: bool,
number_of_random_partitions: int,
split_by_columns: str,
number_of_groups: int = 1):
result = run_mock_test(db_connection,
pyexasol_connection,
split_by_node,
number_of_random_partitions,
split_by_columns,
number_of_groups)
fitted_base_models, fitted_combined_models, unique_base_models = get_results(pyexasol_connection, result)
return result, fitted_base_models, fitted_combined_models, unique_base_models
def get_results(pyexasol_connection, result):
fitted_base_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall()
print("fitted_base_models", fitted_base_models)
fitted_combined_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall()
print("fitted_combined_models", fitted_combined_models)
unique_base_models = {row[4] for row in fitted_base_models}
print("result", result)
return fitted_base_models, fitted_combined_models, unique_base_models
def run_mock_test(db_connection,
pyexasol_connection,
split_by_node: bool,
number_of_random_partitions: int,
split_by_columns: str,
number_of_groups: int = 1):
executor = UDFMockExecutor()
meta = MockMetaData(
script_code_wrapper_function=udf_wrapper,
input_type="SET",
input_columns=[
Column("model_connection", str, "VARCHAR(2000000)"),
Column("path_under_model_connection", str, "VARCHAR(2000000)"),
Column("download_retry_seconds", int, "INTEGER"),
Column("db_connection", str, "VARCHAR(2000000)"),
Column("source_schema_name", str, "VARCHAR(2000000)"),
Column("source_table_name", str, "VARCHAR(2000000)"),
Column("columns", str, "VARCHAR(2000000)"),
Column("target_schema_name", str, "VARCHAR(2000000)"),
Column("experiment_name", str, "VARCHAR(2000000)"),
Column("epochs", int, "INTEGER"),
Column("batch_size", int, "INTEGER"),
Column("shuffle_buffer_size", int, "INTEGER"),
Column("split_per_node", bool, "BOOLEAN"),
Column("number_of_random_partitions", int, "INTEGER"),
Column("split_by_columns", str, "VARCHAR(2000000)"),
],
output_type="EMIT",
output_columns=[
Column("job_id", str, "VARCHAR(2000000)"),
Column("model_id", str, "VARCHAR(2000000)"),
Column("model_connection_name", str, "VARCHAR(2000000)"),
Column("path_under_model_connection", str, "VARCHAR(2000000)"),
Column("model_path", str, "VARCHAR(2000000)"),
]
)
model_connection, model_connection_name = \
create_model_connection(pyexasol_connection)
drop_and_create_target_schema(pyexasol_connection)
exa = MockExaEnvironment(meta,
connections={
"MODEL_CONNECTION": model_connection,
"DB_CONNECTION": db_connection
})
groups = [Group([(
model_connection_name,
"my_path_under_model_connection_" + str(i),
60,
"DB_CONNECTION",
"TEST",
"ABC",
"A,B,C",
"TARGET_SCHEMA",
"EXPERIMENT",
10,
100,
10000,
split_by_node,
number_of_random_partitions,
split_by_columns
)]) for i in range(number_of_groups)]
result = list(executor.run(groups, exa))
return result
def test_train_udf(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
model_connection, model_connection_name = \
create_model_connection(pyexasol_connection)
db_connection, db_connection_name = \
create_db_connection(pyexasol_connection, db_connection)
target_schema = SchemaName("TARGET_SCHEMA")
drop_and_create_target_schema(pyexasol_connection)
udf_sql = textwrap.dedent(f"""
CREATE OR REPLACE PYTHON3_DSUP SET SCRIPT {target_schema.fully_qualified()}."TRAIN_UDF"(
model_connection VARCHAR(2000000),
path_under_model_connection VARCHAR(2000000),
download_retry_seconds INTEGER,
db_connection VARCHAR(2000000),
source_schema_name VARCHAR(2000000),
source_table_name VARCHAR(2000000),
columns VARCHAR(2000000),
target_schema_name VARCHAR(2000000),
experiment_name VARCHAR(2000000),
epochs INTEGER,
batch_size INTEGER,
shuffle_buffer_size INTEGER,
split_per_node BOOLEAN,
number_of_random_partitions INTEGER,
split_by_columns VARCHAR(2000000)
)
EMITS (
job_id VARCHAR(2000000),
model_id VARCHAR(2000000),
model_connection_name VARCHAR(2000000),
path_under_model_connection VARCHAR(2000000),
model_path VARCHAR(2000000)
) AS
from sklearn.linear_model import SGDRegressor
from numpy.random import RandomState
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \
ColumnDescriptionBasedTablePreprocessorFactory
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \
ColumnPreprocessorDescription
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \
ExactColumnNameSelector
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \
MinMaxScalerFactory
from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF
train_udf = PartialFitRegressionTrainUDF()
def run(ctx):
model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False,
fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling')
table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory(
input_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("A"),
column_preprocessor_factory=MinMaxScalerFactory()
),
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("B"),
column_preprocessor_factory=MinMaxScalerFactory()
),
],
target_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("C"),
column_preprocessor_factory=MinMaxScalerFactory()
),
]
)
train_udf.run(exa, ctx, model, table_preprocessor_factory)
""")
pyexasol_connection.execute(udf_sql)
query_udf = f"""
select {target_schema.fully_qualified()}."TRAIN_UDF"(
'{model_connection_name}',
'my_path_under_model_connection',
60,
'{db_connection_name}',
'TEST',
'ABC',
'A,B,C',
'TARGET_SCHEMA',
'EXPERIMENT',
10,
100,
10000,
True,
4,
null
)
"""
pyexasol_connection.execute(query_udf)
fitted_base_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall()
print(fitted_base_models)
assert len(fitted_base_models) == 4
fitted_combined_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall()
print(fitted_combined_models)
assert len(fitted_combined_models) == 1
def create_model_connection(conn):
model_connection = Connection(address=f"http://localhost:6583/default/model;bfsdefault",
user="w", password="write")
model_connection_name = "MODEL_CONNECTION"
return drop_and_create_connection(conn, model_connection, model_connection_name)
def create_db_connection(conn, db_connection):
db_connection_name = "DB_CONNECTION"
return drop_and_create_connection(conn, db_connection, db_connection_name)
def drop_and_create_connection(conn, model_connection, model_connection_name):
try:
conn.execute(f"DROP CONNECTION {model_connection_name}")
except:
pass
conn.execute(
f"CREATE CONNECTION {model_connection_name} TO '{model_connection.address}' USER '{model_connection.user}' IDENTIFIED BY '{model_connection.password}';")
return model_connection, model_connection_name
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import ast
# This has to be a global due to `exec` shenanigans :-(
current_spec = {}
# SQL types
SQL_TYPES = [
'TEXT',
'DATE',
'DATETIME',
'INTEGER',
'BIGINT',
'UNSIGNED_BIGINT',
'DOUBLE',
'BLOB',
]
# Functions that we don't need
DUMMY_FUNCTIONS = [
'ForeignKey',
'attributes',
'description',
'examples',
'implementation',
'fuzz_paths',
'WINDOWS',
'POSIX',
'LINUX',
'DARWIN',
]
RESERVED_KEYWORDS = [
'table',
'set',
]
def table_name(name, aliases=None):
current_spec['name'] = name
current_spec['aliases'] = aliases
def Column(name, col_type, *args, **kwargs):
if name in RESERVED_KEYWORDS:
name = '"%s"' % name
return (name, col_type)
def schema(schema):
# Filter out 'None' entries (usually from ForeignKeys)
real_schema = [x for x in schema if x is not None]
current_spec['schema'] = real_schema
def extended_schema(macro, schema):
# Filter out 'None' entries (usually from ForeignKeys)
real_schema = [x for x in schema if x is not None]
current_spec.setdefault('extended_schema', []).extend(real_schema)
def extract_schema(filename):
namespace = {
'Column': Column,
'schema': schema,
'table_name': table_name,
'extended_schema': extended_schema,
'current_spec': {},
}
for fn in DUMMY_FUNCTIONS:
namespace[fn] = lambda *args, **kwargs: None
for ty in SQL_TYPES:
namespace[ty] = ty
with open(filename, 'rU') as f:
tree = ast.parse(f.read())
exec(compile(tree, '<string>', 'exec'), namespace)
columns = ', '.join('%s %s' % (x[0], x[1]) for x in current_spec['schema'])
statements = []
statements = []
statements.append('CREATE TABLE %s (%s);' % (current_spec['name'], columns))
if 'extended_schema' in current_spec:
statement = 'ALTER TABLE %s ADD %%s %%s;' % (current_spec['name'], )
for column_name, column_definition in current_spec['extended_schema']:
statements.append(statement % (column_name, column_definition))
del current_spec['extended_schema']
return '\n'.join(statements)
if __name__ == '__main__':
import sys
print(extract_schema(sys.argv[1]))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2021, Spyder Bot
#
# Licensed under the terms of the MIT license
# ----------------------------------------------------------------------------
"""
Status bar widgets.
"""
# Third-party imports
from qtpy.QtCore import Signal, Slot
from qtpy.QtWidgets import QComboBox
# Spyder imports
from spyder.api.config.decorators import on_conf_change
from spyder.api.translations import get_translation
from spyder.api.widgets.status import StatusBarWidget
# Localization
_ = get_translation("status_bar_widgets.spyder")
# ---- Constants
class StatusbarWidgets:
ThemeStatus = 'theme-status'
PlainFontSizeStatus = 'combobox-status'
# ---- Theme widget
class ThemeStatusWidget(StatusBarWidget):
"""
Widget to display the current syntax highlighting theme.
Notes
-----
* Status bar widgets need to inherit from StatusBarWidget or
BaseTimerStatus.
* See container.py to check how its label is updated and plugin.py
to see how it's registered in the status bar.
"""
ID = StatusbarWidgets.ThemeStatus
# ---- Font size widget
class PlainFontSizeComboBox(QComboBox):
def __init__(self, parent):
super().__init__(parent)
# Add some font sizes to choose from.
self.addItems([str(i) for i in range(9, 16)])
class PlainFontSizeStatus(StatusBarWidget):
ID = StatusbarWidgets.PlainFontSizeStatus
CUSTOM_WIDGET_CLASS = PlainFontSizeComboBox
sig_size_change_requested = Signal(int)
"""
This is signal is emitted to request for a plain text font size
change in Spyder.
Parameters
----------
font_size: int
New font size (in pixels).
"""
def __init__(self, parent):
super().__init__(parent)
self.custom_widget.currentTextChanged.connect(self.set_size)
def set_current_size(self, size):
"""Set current font size in combobox."""
# The value that comes from Spyder config system is an int, but
# the combobox only accepts strings.
size = str(size)
# Add size to combobox in case it's not present among items
if self.custom_widget.findText(size) == -1:
self.custom_widget.addItem(size)
# Set size as default value
index = self.custom_widget.findText(size)
self.custom_widget.setCurrentIndex(index)
@Slot(str)
def set_size(self, value):
"""
Set selected size in combobox in Spyder config system and
request a change.
"""
# In Spyder this is an int, not a string.
value = int(value)
# *Note*: This should be as simple as setting the new font size and
# seeing the changes happen in Spyder. Unfortunately, that's not the
# way it's working right now, but it will be in Spyder 5.1.0.
# For now we have to emit a signal and handle the update manually at
# the plugin level.
self.set_conf(section='appearance', option='font/size', value=value)
self.sig_size_change_requested.emit(value)
|
nilq/baby-python
|
python
|
from .. import Provider as CreditCardProvider
class Provider(CreditCardProvider):
pass
|
nilq/baby-python
|
python
|
import collections
import time
import warnings
from collections import namedtuple
import numpy as np
import torch
from tianshou.data import Batch, ReplayBuffer
from tianshou.env import BaseVectorEnv, VectorEnv
Experience = namedtuple('Exp', ['hidden', 'obs', 'act', 'reward', 'obs_next', 'done'])
HIDDEN_SIZE = 256
class Collector(object):
"""The :class:`~tianshou.data.Collector` enables the policy to interact
with different types of environments conveniently.
:param policy: an instance of the :class:`~tianshou.policy.BasePolicy`
class.
:param env: an environment or an instance of the
:class:`~tianshou.env.BaseVectorEnv` class.
:param buffer: an instance of the :class:`~tianshou.data.ReplayBuffer`
class, or a list of :class:`~tianshou.data.ReplayBuffer`. If set to
``None``, it will automatically assign a small-size
:class:`~tianshou.data.ReplayBuffer`.
:param int stat_size: for the moving average of recording speed, defaults
to 100.
Example:
::
policy = PGPolicy(...) # or other policies if you wish
env = gym.make('CartPole-v0')
replay_buffer = ReplayBuffer(size=10000)
# here we set up a collector with a single environment
collector = Collector(policy, env, buffer=replay_buffer)
# the collector supports vectorized environments as well
envs = VectorEnv([lambda: gym.make('CartPole-v0') for _ in range(3)])
buffers = [ReplayBuffer(size=5000) for _ in range(3)]
# you can also pass a list of replay buffer to collector, for multi-env
# collector = Collector(policy, envs, buffer=buffers)
collector = Collector(policy, envs, buffer=replay_buffer)
# collect at least 3 episodes
collector.collect(n_episode=3)
# collect 1 episode for the first env, 3 for the third env
collector.collect(n_episode=[1, 0, 3])
# collect at least 2 steps
collector.collect(n_step=2)
# collect episodes with visual rendering (the render argument is the
# sleep time between rendering consecutive frames)
collector.collect(n_episode=1, render=0.03)
# sample data with a given number of batch-size:
batch_data = collector.sample(batch_size=64)
# policy.learn(batch_data) # btw, vanilla policy gradient only
# supports on-policy training, so here we pick all data in the buffer
batch_data = collector.sample(batch_size=0)
policy.learn(batch_data)
# on-policy algorithms use the collected data only once, so here we
# clear the buffer
collector.reset_buffer()
For the scenario of collecting data from multiple environments to a single
buffer, the cache buffers will turn on automatically. It may return the
data more than the given limitation.
.. note::
Please make sure the given environment has a time limitation.
"""
def __init__(self, policy, env, buffer=None, episodic=False, stat_size=5, **kwargs):
super().__init__()
if not isinstance(env, BaseVectorEnv):
self.env = VectorEnv([env])
else:
self.env = env
self._collect_step = 0
self._collect_episode = 0
self._collect_time = 0
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._episodic = episodic
if self._episodic and buffer is not None:
self._cached_buf = [ReplayBuffer(buffer._maxsize // self.env.env_num) for _ in range(self.env.env_num)]
self.stat_size = stat_size
self._step_speed = collections.deque([], self.stat_size)
self._episode_speed = collections.deque([], self.stat_size)
self._episode_length = collections.deque([], self.stat_size)
self._episode_reward = collections.deque([], self.stat_size)
self.reset()
def reset(self):
"""Reset all related variables in the collector."""
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self._step_speed.clear()
self._episode_speed.clear()
self._episode_length.clear()
self._episode_reward.clear()
self._collect_step = 0
self._collect_episode = 0
self._collect_time = 0
def reset_buffer(self):
"""Reset the main data buffer."""
if self._episodic:
[b.reset() for b in self._cached_buf]
if self.buffer is not None:
self.buffer.reset()
def get_env_num(self):
"""Return the number of environments the collector has."""
return self.env.env_num
def reset_env(self):
"""Reset all of the environment(s)' states and reset all of the cache
buffers (if need).
"""
self._obs = self.env.reset()
self._act = self._rew = self._done = None
self._hidden_next = self._hidden = np.zeros((self.get_env_num(), HIDDEN_SIZE))
self.reward = np.zeros(self.env.env_num)
self.length = np.zeros(self.env.env_num)
def seed(self, seed=None):
"""Reset all the seed(s) of the given environment(s)."""
return self.env.seed(seed)
def render(self, **kwargs):
"""Render all the environment(s)."""
return self.env.render(**kwargs)
def close(self):
"""Close the environment(s)."""
self.env.close()
def _to_numpy(self, x):
"""Return an object without torch.Tensor."""
if isinstance(x, torch.Tensor):
return x.cpu().numpy()
elif isinstance(x, dict):
for k in x:
if isinstance(x[k], torch.Tensor):
x[k] = x[k].cpu().numpy()
return x
elif isinstance(x, Batch):
x.to_numpy()
return x
return x
def collect(self, n_step=0, n_episode=0, sampling=False, render=None):
"""Collect a specified number of step or episode.
:param int n_step: how many steps you want to collect.
:param n_episode: how many episodes you want to collect (in each
environment).
:type n_episode: int or list
:param float render: the sleep time between rendering consecutive
frames, defaults to ``None`` (no rendering).
.. note::
One and only one collection number specification is permitted,
either ``n_step`` or ``n_episode``.
:return: A dict including the following keys
* ``n/ep`` the collected number of episodes.
* ``n/st`` the collected number of steps.
* ``v/st`` the speed of steps per second.
* ``v/ep`` the speed of episode per second.
* ``rew`` the mean reward over collected episodes.
* ``len`` the mean length over collected episodes.
"""
warning_count = 0
start_time = time.time()
assert not (n_step and n_episode), "One and only one collection number specification is permitted!"
cur_step = 0
cur_episode = np.zeros(self.env.env_num)
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
batch_data = Batch(obs=self._obs, act=self._act, rew=self._rew, done=self._done)
if sampling == True:
self._act = self.env.sample()
else:
with torch.no_grad():
result = self.policy(batch_data, self._hidden)
if hasattr(result, 'hidden') and result.hidden is not None:
self._hidden_next = result.hidden
if isinstance(result.act, torch.Tensor):
self._act = self._to_numpy(result.act)
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, _ = self.env.step(self._act)
if render is not None:
self.env.render()
if render > 0:
time.sleep(render)
self.length += 1
self.reward += self._rew
for i in range(self.env.env_num):
warning_count += 1
collection = Experience(
self._hidden[i], self._obs[i], self._act[i], self._rew[i], obs_next[i], self._done[i]
)
if not self._episodic:
cur_step += 1
if self.buffer is not None:
self.buffer.add(collection)
else:
self._cached_buf[i].add(collection)
if self._done[i]:
if self._episodic:
cur_step += len(self._cached_buf[i])
if self.buffer is not None:
self.buffer.extend(self._cached_buf[i])
cur_episode[i] += 1
self._episode_reward.append(self.reward[i])
self._episode_length.append(self.length[i])
self.reward[i], self.length[i] = 0, 0
if sum(self._done):
ids = np.where(self._done)[0]
obs_next = self.env.reset(ids)
self._hidden_next[self._done] = 0.
self._obs = obs_next
self._hidden = self._hidden_next
if n_episode and np.sum(cur_episode) >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self._step_speed.append(cur_step / duration)
self._episode_speed.append(cur_episode / duration)
self._collect_step += cur_step
self._collect_episode += cur_episode
self._collect_time += duration
return {
'n/ep': cur_episode,
'n/st': cur_step,
'n/buffer': len(self.buffer) if self.buffer else 0,
'v/st': np.nanmean(self._step_speed),
'v/ep': np.nanmean(self._episode_speed) if self._collect_episode else 0,
'ep/reward': np.nanmean(self._episode_reward) if self._collect_episode else 0,
'ep/len': np.nanmean(self._episode_length) if self._collect_episode else 0,
}
def sample(self, batch_size):
"""Sample a data batch from the internal replay buffer. It will call
:meth:`~tianshou.policy.BasePolicy.process_fn` before returning
the final batch data.
:param int batch_size: ``0`` means it will extract all the data from
the buffer, otherwise it will extract the data with the given
batch_size.
"""
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from keystoneauth1 import loading as kaloading
from oslo_utils import importutils
from ironicclient.common.i18n import _
from ironicclient import exc
LOG = logging.getLogger(__name__)
# TODO(vdrok): remove in Stein
def convert_keystoneauth_opts(kwargs):
old_to_new_names = {
('os_auth_token',): 'token',
('os_username',): 'username',
('os_password',): 'password',
('os_auth_url',): 'auth_url',
('os_project_id',): 'project_id',
('os_project_name',): 'project_name',
('os_tenant_id',): 'tenant_id',
('os_tenant_name',): 'tenant_name',
('os_region_name',): 'region_name',
('os_user_domain_id',): 'user_domain_id',
('os_user_domain_name',): 'user_domain_name',
('os_project_domain_id',): 'project_domain_id',
('os_project_domain_name',): 'project_domain_name',
('os_service_type',): 'service_type',
('os_endpoint_type',): 'interface',
('ironic_url',): 'endpoint',
('os_cacert', 'ca_file'): 'cafile',
('os_cert', 'cert_file'): 'certfile',
('os_key', 'key_file'): 'keyfile'
}
for olds, new in old_to_new_names.items():
for old in olds:
if kwargs.get(old):
LOG.warning('The argument "%s" passed to get_client is '
'deprecated and will be removed in Stein release, '
'please use "%s" instead.', old, new)
kwargs.setdefault(new, kwargs[old])
def get_client(api_version, auth_type=None, os_ironic_api_version=None,
max_retries=None, retry_interval=None, **kwargs):
"""Get an authenticated client, based on the credentials.
:param api_version: the API version to use. Valid value: '1'.
:param auth_type: type of keystoneauth auth plugin loader to use.
:param os_ironic_api_version: ironic API version to use.
:param max_retries: Maximum number of retries in case of conflict error
:param retry_interval: Amount of time (in seconds) between retries in case
of conflict error.
:param kwargs: all the other params that are passed to keystoneauth.
"""
# TODO(TheJulia): At some point, we should consider possibly noting
# the "latest" flag for os_ironic_api_version to cause the client to
# auto-negotiate to the greatest available version, however we do not
# have the ability yet for a caller to cap the version, and will hold
# off doing so until then.
convert_keystoneauth_opts(kwargs)
if auth_type is None:
if 'endpoint' in kwargs:
if 'token' in kwargs:
auth_type = 'admin_token'
else:
auth_type = 'none'
elif 'token' in kwargs and 'auth_url' in kwargs:
auth_type = 'token'
else:
auth_type = 'password'
session = kwargs.get('session')
if not session:
loader = kaloading.get_plugin_loader(auth_type)
loader_options = loader.get_options()
# option.name looks like 'project-name', while dest will be the actual
# argument name to which the value will be passed to (project_name)
auth_options = [o.dest for o in loader_options]
# Include deprecated names as well
auth_options.extend([d.dest for o in loader_options
for d in o.deprecated])
auth_kwargs = {k: v for (k, v) in kwargs.items() if k in auth_options}
auth_plugin = loader.load_from_options(**auth_kwargs)
# Let keystoneauth do the necessary parameter conversions
session_loader = kaloading.session.Session()
session_opts = {k: v for (k, v) in kwargs.items() if k in
[o.dest for o in session_loader.get_conf_options()]}
session = session_loader.load_from_options(auth=auth_plugin,
**session_opts)
endpoint = kwargs.get('endpoint')
if not endpoint:
try:
# endpoint will be used to get hostname
# and port that will be used for API version caching.
endpoint = session.get_endpoint(
service_type=kwargs.get('service_type') or 'baremetal',
interface=kwargs.get('interface') or 'publicURL',
region_name=kwargs.get('region_name')
)
except Exception as e:
raise exc.AmbiguousAuthSystem(
_('Must provide Keystone credentials or user-defined '
'endpoint, error was: %s') % e)
ironicclient_kwargs = {
'os_ironic_api_version': os_ironic_api_version,
'max_retries': max_retries,
'retry_interval': retry_interval,
'session': session,
'endpoint_override': endpoint
}
return Client(api_version, **ironicclient_kwargs)
def Client(version, *args, **kwargs):
module = importutils.import_versioned_module('ironicclient',
version, 'client')
client_class = getattr(module, 'Client')
return client_class(*args, **kwargs)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 13:56:25 2020
Authors: Pavan Kota, Daniel LeJeune
Reference:
P. K. Kota, D. LeJeune, R. A. Drezek, and R. G. Baraniuk, "Extreme Compressed
Sensing of Poisson Rates from Multiple Measurements," Mar. 2021.
arXiv ID:
"""
# Multiple Measurement Vector Compressed Sensing
from abc import ABC, abstractmethod
import numpy as np
import pickle
class SignalGenerator(ABC):
"""Methods for generating X
"""
@abstractmethod
def xgen(self, N, D, k):
"""Generate an N x D signal matrix X
Parameters
----------
N: int
Dimension of signals
D: int
Number of N-dimensional signals to generate
k: int
Sparsity level. Number of nonzero elements in lambda^* (true Poisson rates)
Returns
-------
X : (N, D) ndarray
Samples of X for each column of Y.
"""
pass
class MMVP(SignalGenerator):
""" Multiple Measurement Vector with Poisson constraints (MMVP) signal generator
"""
def __init__(self, N, D, k, lamTot, initialSeed=None):
"""
New Parameters
----------
lamTot: float or int
Sum(lambda^*). Corresponds with, for example, average analyte number per observation
initialSeed: int, optional
Seed for restoring RNG if X's are generated multiple times in same
script and generating the initial X's again is desired.
"""
if k > N :
raise ValueError("k must be less than N")
self.N = N
self.D = D
self.k = k
self.lamTot = lamTot
self.initialSeed = initialSeed
#np.random.seed(initialSeed)
self._generator = np.random.default_rng(initialSeed)
def set_lambda(self):
lambdaStar = np.zeros(self.N)
# Choose sparse rows randomly
rowInds = np.random.choice(self.N, self.k, replace=False)
# Set lambda randomly
lambdaStar[rowInds] = self.get_mags()
return lambdaStar
def xgen(self):
lambdaStar = self.set_lambda()
# Generate X's
X = self._generator.poisson(lambdaStar[:, None], (self.N, self.D))
return X, lambdaStar
def gen_trials(self, numTrials, seed=None, savePath=None):
"""
Parameters
----------
numTrials : int
Number of trials to generate sensing matrices for
seed : int, optional
Random seed initial state. The default is None.
savePath: string or None
Path including filename (.pickle file type) to store generated
X's and lambda^*'s. If None, signals are not saved.
"""
# Which to use? Need consistent selection of k rows too
if seed is None:
np.random.seed(self.initialSeed)
self._generator = np.random.default_rng(self.initialSeed)
else:
np.random.seed(seed)
self._generator = np.random.default_rng(seed)
allX = np.zeros((self.N, self.D, numTrials))
allLambdaStars = np.zeros((self.N, numTrials))
for i in range(numTrials):
allX[:,:,i], allLambdaStars[:,i] = self.xgen()
if savePath is not None:
allSignals = {'signalModelUsed': self, 'allX': allX, 'allLambdaStars': allLambdaStars}
with open(savePath, 'wb') as fileWrite:
pickle.dump(allSignals, fileWrite)
return allX, allLambdaStars
def get_mags(self):
mags = self._generator.uniform(size=self.k)
return mags / np.sum(mags) * self.lamTot
class MMVPConstantLambda(MMVP):
def __init__(self, N, D, k, lambda_val, initialSeed=None):
"""
New Parameters
----------
lambda_val: float or int
Value to set any nonzero value of lambda to
"""
if k > N :
raise ValueError("k must be less than N")
self.N = N
self.D = D
self.k = k
self.lambda_val = lambda_val
self.initialSeed = initialSeed
self._generator = np.random.default_rng(initialSeed)
def get_mags(self):
return np.ones(self.k) * self.lambda_val
class MMVPInputLambda(MMVP):
def __init__(self, D, lambda_vec, initialSeed=None):
"""
New Parameters
----------
lambda_vec: numpy array, shape (N,)
Fixed lambda vector
"""
self.lam = lambda_vec
self.N = np.size(lambda_vec)
self.D = D
self.initialSeed = initialSeed
self._generator = np.random.default_rng(initialSeed)
def set_lambda(self):
return self.lam
def get_mags(self):
pass
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from lazytorch import (
LazyConv2dInChannelModule,
create_lazy_signature,
NamedSequential,
)
from .depth_sep_conv import DepthwiseConv2d, PointwiseConv2d
from .squeeze_excitation import SqueezeExcitation
from typing import Optional
class InvertedBottleneck(nn.Module):
"""An inverted bottleneck block with optional squeeze-and-excitiation
layer. References:
- MobileNetV2 (https://arxiv.org/abs/1801.04381)
- MnasNet (https://arxiv.org/abs/1807.11626)"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
expansion_ratio: int = 1,
use_se: bool = False,
se_reduction_ratio: Optional[int] = None,
norm_layer: nn.Module = nn.BatchNorm2d,
activation: nn.Module = nn.ReLU,
):
super().__init__()
self.stride = stride
self.out_channels = out_channels
mid_channels = in_channels * expansion_ratio
self.layers = NamedSequential(
pw=PointwiseConv2d(
in_channels,
mid_channels,
norm_layer=norm_layer,
activation=activation,
),
dw=DepthwiseConv2d(
mid_channels,
kernel_size=kernel_size,
stride=stride,
norm_layer=norm_layer,
activation=activation,
),
se=nn.Identity(),
bottleneck=nn.Conv2d(mid_channels, out_channels, 1),
)
if use_se:
self.layers.se = SqueezeExcitation(
mid_channels, reduction_ratio=se_reduction_ratio
)
def forward(self, x: torch.Tensor):
out = self.layers(x)
if x.shape == out.shape:
out += x
return out
@create_lazy_signature(exclude=["in_channels"])
class LazyInvertedBottleneck(LazyConv2dInChannelModule, InvertedBottleneck):
"""Lazily-initialized InvertedBottleneck module"""
|
nilq/baby-python
|
python
|
from rest_framework import generics, status
from rest_framework import viewsets
from rest_framework.exceptions import (
ValidationError
)
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from .models import (
Category, Recipe
)
from .serializers import (
CategorySerializer, RecipeSerializer,
)
class CategoryViewSet(viewsets.ModelViewSet):
permission_classes = (AllowAny,)
serializer_class = CategorySerializer
def get_queryset(self):
# list categories
queryset = Category.objects.all()
return queryset
def get_object(self):
if self.kwargs.get("pk"):
category = Category.objects.filter(pk=self.kwargs.get("pk")).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
return category
def create(self, request):
# check if category already exists
category = Category.objects.filter(
name=request.data.get('name'),
)
if category:
msg='Category with that name already exists'
raise ValidationError(msg)
return super().create(request)
def destroy(self, request, *args, **kwargs):
category = Category.objects.filter(pk=self.kwargs["pk"]).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
return super().destroy(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
category = Category.objects.filter(pk=self.kwargs["pk"]).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
return super().update(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save()
class CategoryRecipes(generics.ListCreateAPIView):
permission_classes = (AllowAny,)
serializer_class = RecipeSerializer
def get_queryset(self):
if self.kwargs.get("category_pk"):
category = Category.objects.filter(pk=self.kwargs["category_pk"]).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
queryset = Recipe.objects.filter(
category=category
)
return queryset
# def create(self, request, *args, **kwargs):
# serializer = self.get_serializer(data=request.data)
# if not serializer.is_valid():
# return Response(
# serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# category = Category.objects.get(pk=self.kwargs["category_pk"])
# item = Recipe.objects.create(
# name=serializer.data['name'],
# description=serializer.data['description'],
# ingredients=serializer.data['ingredients'],
# image=serializer.data['image'],
# directions=serializer.data['directions'],
# is_public=serializer.data['is_public'],
# category=category,
# )
# result = self.serializer_class(item)
# return Response(result.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
category = Category.objects.filter(pk=self.kwargs["category_pk"]).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
serializer.save(category=category)
class SingleCategoryRecipe(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (AllowAny,)
serializer_class = RecipeSerializer
def get_queryset(self):
if self.kwargs.get("category_pk") and self.kwargs.get("pk"):
category = Category.objects.filter(pk=self.kwargs["category_pk"]).first()
if not category:
msg='Category with that id does not exists'
raise ValidationError(msg)
queryset = Recipe.objects.filter(
pk=self.kwargs["pk"],
category=category
)
if len(queryset) == 0:
msg=f'Recipe with that id does not exists'
raise ValidationError(msg)
return queryset
class RecipesViewSet(viewsets.ModelViewSet):
permission_classes = (AllowAny,)
serializer_class = RecipeSerializer
def get_queryset(self):
queryset = Recipe.objects.all()
return queryset
# Only authenticated users can create recipes
def create(self, request, *args, **kwargs):
return super().create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save()
class PublicRecipes(generics.ListAPIView):
permission_classes = (AllowAny,)
serializer_class = RecipeSerializer
def get_queryset(self):
queryset = Recipe.objects.all().filter()
return queryset
class PublicRecipesDetail(generics.RetrieveAPIView):
permission_classes = (AllowAny,)
serializer_class = RecipeSerializer
def get_queryset(self):
queryset = Recipe.objects.all().filter(is_public=True)
return queryset
|
nilq/baby-python
|
python
|
import shutil
from tokenizers.normalizers import NFKC
from autonmt.preprocessing import tokenizers
from autonmt.bundle import utils
from autonmt.bundle.utils import *
def normalize_file(input_file, output_file, normalizer, force_overwrite, limit=None):
if force_overwrite or not os.path.exists(output_file):
lines = read_file_lines(input_file, autoclean=True)
lines = lines if not limit else lines[:limit]
lines = lines if not normalizer else [normalizer(line) for line in lines]
write_file_lines(lines=lines, filename=output_file, insert_break_line=True, encoding="utf-8")
assert os.path.exists(output_file)
def pretokenize_file(input_file, output_file, lang, force_overwrite, **kwargs):
# Tokenize
if force_overwrite or not os.path.exists(output_file):
tokenizers.moses_tokenizer(input_file=input_file, output_file=output_file, lang=lang)
assert os.path.exists(output_file)
def encode_file(ds, input_file, output_file, lang, merge_vocabs, truncate_at, force_overwrite, **kwargs):
# Check if file exists
if force_overwrite or not os.path.exists(output_file):
# Apply preprocessing
# Copy file
if ds.subword_model in {None, "none"}:
shutil.copyfile(input_file, output_file)
elif ds.subword_model in {"bytes"}:
# Save file as UTF8 and make sure everything uses NFKC
lines = read_file_lines(input_file, autoclean=True)
lines = [NFKC().normalize_str(line) for line in lines]
lines = [" ".join([hex(x) for x in line.encode()]) for line in lines]
write_file_lines(lines=lines, filename=output_file, insert_break_line=True)
else:
# Select model
if merge_vocabs:
model_path = ds.get_vocab_file() + ".model"
else:
model_path = ds.get_vocab_file(lang=lang) + ".model"
# Encode files
tokenizers.spm_encode(spm_model_path=model_path, input_file=input_file, output_file=output_file)
# Truncate if needed
if truncate_at:
lines = read_file_lines(output_file, autoclean=True)
lines = [" ".join(line.split(' ')[:truncate_at]).strip() for line in lines]
write_file_lines(lines=lines, filename=output_file, insert_break_line=True)
# Check that the output file exist
assert os.path.exists(output_file)
def decode_file(input_file, output_file, lang, subword_model, pretok_flag, model_vocab_path, force_overwrite,
remove_unk_hyphen=False, **kwargs):
if force_overwrite or not os.path.exists(output_file):
# Detokenize
if subword_model in {None, "none"}:
# Rename or copy files (tok==txt)
shutil.copyfile(input_file, output_file)
elif subword_model in {"bytes"}:
# Decode files
lines = read_file_lines(input_file, autoclean=True)
lines = [clean_file_line(bytes([int(x, base=16) for x in line.split(' ')])) for line in lines]
# Write files
write_file_lines(lines=lines, filename=output_file, insert_break_line=True)
else:
# Decode files
tokenizers.spm_decode(model_vocab_path + ".model", input_file=input_file, output_file=output_file)
# Remove the hyphen of unknown words when needed
if remove_unk_hyphen:
replace_in_file('▁', ' ', output_file)
# Detokenize with moses
if pretok_flag:
tokenizers.moses_detokenizer(input_file=output_file, output_file=output_file, lang=lang)
# Check that the output file exist
assert os.path.exists(output_file)
def decode_lines(lines, lang, subword_model, pretok_flag, model_vocab_path, remove_unk_hyphen=False):
# Detokenize
if subword_model in {None, "none"}:
# Rename or copy files (tok==txt)
lines = lines
elif subword_model in {"bytes"}:
# Decode files
lines = [utils.clean_file_line(bytes([int(x, base=16) for x in line.split(' ')])) for line in lines]
else:
# Decode files
lines = tokenizers._spm_decode(lines, model_vocab_path + ".model")
# Remove the hyphen of unknown words when needed
if remove_unk_hyphen:
lines = [line.replace('▁', ' ') for line in lines]
# Detokenize with moses
if pretok_flag:
lines = tokenizers._moses_detokenizer(lines, lang=lang)
return lines
|
nilq/baby-python
|
python
|
"""PythonHere app."""
# pylint: disable=wrong-import-order,wrong-import-position
from launcher_here import try_startup_script
try:
try_startup_script() # run script entrypoint, if it was passed
except Exception as exc:
startup_script_exception = exc # pylint: disable=invalid-name
else:
startup_script_exception = None # pylint: disable=invalid-name
import asyncio
import os
from pathlib import Path
import sys
import threading
from typing import Any, Dict
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config, ConfigParser
from kivy.logger import Logger
from enum_here import ScreenName, ServerState
from exception_manager_here import install_exception_handler, show_exception_popup
from patches_here import monkeypatch_kivy
from server_here import run_ssh_server
from window_here import reset_window_environment
monkeypatch_kivy()
class PythonHereApp(App):
"""PythonHere main app."""
def __init__(self):
super().__init__()
self.server_task = None
self.settings = None
self.ssh_server_config_ready = asyncio.Event()
self.ssh_server_started = asyncio.Event()
self.ssh_server_connected = asyncio.Event()
self.ssh_server_namespace = {}
self.icon = "data/logo/logo-32.png"
@property
def upload_dir(self) -> str:
"""Path to the directory to use for uploaded data."""
root_dir = Path(self.user_data_dir or ".").resolve()
upload_dir = Path(root_dir) / "upload"
upload_dir.mkdir(exist_ok=True)
return str(upload_dir)
@property
def config_path(self) -> str:
"""Path to the application config file."""
root_dir = Path(self.user_data_dir or ".").resolve()
return str(root_dir / "config.ini")
def load_config(self) -> ConfigParser:
"""Returning the application configuration."""
Config.read(self.config_path) # Override the configuration file location
return super().load_config()
def build(self):
"""Initialize application UI."""
super().build()
install_exception_handler()
self.settings = self.root.ids.settings
self.ssh_server_namespace.update(
{
"app": self,
"root": self.root,
}
)
self.update_server_config_status()
if startup_script_exception:
Clock.schedule_once(
lambda _: show_exception_popup(startup_script_exception), 0
)
def run_app(self):
"""Run application and SSH server tasks."""
self.ssh_server_started = asyncio.Event()
self.server_task = asyncio.ensure_future(run_ssh_server(self))
return asyncio.gather(self.async_run_app(), self.server_task)
async def async_run_app(self):
"""Run app asynchronously."""
try:
await self.async_run(async_lib="asyncio")
Logger.info("PythonHere: async run completed")
except asyncio.CancelledError:
Logger.info("PythonHere: app main task canceled")
except Exception as exc:
Logger.exception(exc)
if self.server_task:
self.server_task.cancel()
if self.get_running_app():
self.stop()
await self.cancel_asyncio_tasks()
async def cancel_asyncio_tasks(self):
"""Cancel all asyncio tasks."""
tasks = [
task for task in asyncio.all_tasks() if task is not asyncio.current_task()
]
if tasks:
for task in tasks:
task.cancel()
await asyncio.wait(tasks, timeout=1)
def update_server_config_status(self):
"""Check and update value of the `ssh_server_config_ready`, update screen."""
def update():
if all(self.get_pythonhere_config().values()):
self.ssh_server_config_ready.set()
screen.update()
screen = self.root.ids.here_screen_manager
screen.current = ServerState.starting_server
self.root.switch_screen(ScreenName.here)
threading.Thread(name="update_server_config_status", target=update).start()
def get_pythonhere_config(self):
"""Return user settings for SSH server."""
return self.settings.get_pythonhere_config()
def update_ssh_server_namespace(self, namespace: Dict[str, Any]):
"""Update SSH server namespace."""
self.ssh_server_namespace.update(namespace)
def on_start(self):
"""App start handler."""
Logger.info("PythonHere: app started")
def on_stop(self):
"""App stop handler."""
Logger.info("PythonHere: app stopped")
def on_pause(self):
"""Pause mode request handler."""
return True
def on_ssh_connection_made(self):
"""New authenticated SSH client connected handler."""
Logger.info("PythonHere: new SSH client connected")
if not self.ssh_server_connected.is_set():
self.ssh_server_connected.set()
Logger.info("PythonHere: reset window environment")
self.ssh_server_namespace["root"] = reset_window_environment()
self.chdir(self.upload_dir)
def chdir(self, path: str):
"""Changes the working directory."""
Logger.info("PythonHere: change working directory to %s", path)
os.chdir(path)
sys.path.insert(0, path)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(PythonHereApp().run_app())
loop.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import random
#random.seed(1) # comment-out this line to change sequence each time
# Write a program that stores random DNA sequence in a string
# The sequence should be 30 nt long
# On average, the sequence should be 60% AT
# Calculate the actual AT fraction while generating the sequence
# Report the length, AT fraction, and sequence
seq = ''
at_count = 0
for i in range(30):
n = random.randint(1,10);
print(n,end=' ')
if 1<=n<=3:
seq+='A'
at_count+=1
elif 4<=n<=6:
seq+='T'
at_count+=1
elif 7<=n<=8: seq+='G'
else: seq+='C'
print('\n',len(seq), at_count/len(seq), seq)
"""
python3 at_seq.py
30 0.6666666666666666 ATTACCGTAATCTACTATTAAGTCACAACC
"""
|
nilq/baby-python
|
python
|
import numpy as np
import typing as tp
import matplotlib.pyplot as plt
import pickle
import scipy.signal as signal
import shapely.geometry
import scipy.interpolate as interp
from taylor import PointAccumulator
from dataclasses import dataclass
def find_datapoints(image, start=0):
# _image = 255 - image
_image = image
window1 = signal.gaussian(50, 15)
window1_sum = window1.sum()
differentiator = PointAccumulator(num_lines=1)
x = np.linspace(0, 1, _image.shape[0])
for i in range(start, _image.shape[1]):
raw_signal = _image[:, i]
filtered_signal = signal.fftconvolve(raw_signal, window1, mode='same')/window1_sum
peaks = np.sort(signal.find_peaks(
filtered_signal,
prominence=5,
distance=100
)[0])
# peaks = sorted(tmp_peaks, key=lambda x: filtered_signal[x], reverse=True)[:4]
# yield i, filtered_signal[peaks]
if len(peaks) == 0:
continue
new_points = differentiator.add_point(i, peaks, look_back=3)
# Probably want to move away from generator. Use differentiator always
yield i, new_points # TODO: Return any number of points, and use separate method to filter
# yield i, peaks[:1] # TODO: Return any number of points, and use separate method to filter
fig, (ax1, ax2) = plt.subplots(2)
ax2.imshow(_image, cmap="gray")
ax2.axvline(i, color="r")
ax1.plot(raw_signal)
ax1.plot(filtered_signal, "--")
ax1.plot(peaks, filtered_signal[peaks], "x", linewidth=20)
plt.show()
plt.close(fig)
if __name__ == "__main__":
# contours = list(np.load("contours.npy", allow_pickle=True))
# take1(contours)
# take2(contours)
for contour_number in [3]:
contour_image = np.load(f"tmp_contours/image_contour{contour_number}.npy")
# plt.imshow(contour_image)
# plt.show()
# assert False
# print(contour_image.shape)
new_image = np.zeros(contour_image.shape)
point_list = []
x_list = []
y_list = []
for i, new_y in find_datapoints(contour_image, start=7300):
# point_list.append((i, new_y))
new_y = new_y[0]
new_image[int(new_y), i] = 255
x_list.append(i)
y_list.append(int(new_y))
fig, (ax1, ax2) = plt.subplots(2)
ax1.imshow(new_image)
x_arr = np.asarray(x_list, dtype=np.float_)
y_arr = np.asarray(y_list, dtype=np.float_)
y_arr -= y_arr.mean() # mean zero
y_arr *= -1 # flip
ax2.plot(x_arr, y_arr)
out_array = np.zeros((x_arr.size, 2))
out_array[:, 0] = x_arr
out_array[:, 1] = y_arr
np.save(f"tmp_lines/out_array{contour_number}", out_array)
plt.show()
# from scipy.signal import welch
# f, pxx = welch(y_arr, 1600e3)
# plt.loglog(f, pxx)
# plt.show()
# for i in range(100, contour_image.shape[1]):
# for i in range(100, 200):
# print(np.median(contour_image[i, :]))
|
nilq/baby-python
|
python
|
"""
Tests for the GeniusZone class
"""
import unittest
from unittest.mock import Mock
from geniushubclient.const import IMODE_TO_MODE, ZONE_MODE, ZONE_TYPE
from geniushubclient.zone import GeniusZone
class GeniusZoneDataStateTests(unittest.TestCase):
"""
Test for the GeniusZone Class, state data.
"""
_device_id = "Device Id"
_zone_name = "Zone Name"
raw_json = {
"iID": _device_id,
"strName": _zone_name,
"bIsActive": 0,
"bInHeatEnabled": 0,
"bOutRequestHeat": 0,
"fBoostSP": 0,
"fPV": 21.0,
"fPV_offset": 0.0,
"fSP": 14.0,
"iBoostTimeRemaining": 0,
"iFlagExpectedKit": 517,
"iType": ZONE_TYPE.OnOffTimer,
"iMode": ZONE_MODE.Off,
"objFootprint": {
"bIsNight": 0,
"fFootprintAwaySP": 14.0,
"iFootprintTmNightStart": 75600,
"iProfile": 1,
"lstSP": [{
"fSP": 16.0,
"iDay": 0,
"iTm": 0
}, {
"fSP": 14.0,
"iDay": 0,
"iTm": 23400
}, {
"fSP": 20.0,
"iDay": 0,
"iTm": 59700
}, {
"fSP": 14.0,
"iDay": 0,
"iTm": 75000
}, {
"fSP": 16.0,
"iDay": 0,
"iTm": 75600
}
],
"objReactive": {
"fActivityLevel": 0.0
}
},
"objTimer": [{
"fSP": 14.0,
"iDay": 0,
"iTm": -1
}],
"trigger": {
"reactive": 0,
"output": 0
},
"warmupDuration": {
"bEnable": "true",
"bEnableCalcs": "true",
"fRiseRate": 0.5,
"iLagTime": 2420,
"iRiseTime": 300,
"iTotalTime": 2720
},
"zoneReactive": {
"fActivityLevel": 0
},
"zoneSubType": 1
}
def setUp(self):
hub = Mock()
hub.api_version = 3
self.hub = hub
def test_when_bIsActive_is_false_then_state_bIsActive_false(self):
"Check that the bIsActive is correctly set to false"
self.raw_json["bIsActive"] = 0
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertFalse(genius_zone.data["_state"]["bIsActive"])
def test_when_bIsActive_is_true_then_state_bIsActive_true(self):
"Check that the bIsActive is correctly set to true"
self.raw_json["bIsActive"] = 1
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertTrue(genius_zone.data["_state"]["bIsActive"])
def test_when_bOutRequestHeat_is_false_then_output_false(self):
"Check that the bOutRequestHeat is correctly set to false"
self.raw_json["bOutRequestHeat"] = 0
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertEqual(genius_zone.data["output"], 0)
def test_when_bOutRequestHeat_is_true_then_output_true(self):
"Check that the bOutRequestHeat is correctly set to true"
self.raw_json["bOutRequestHeat"] = 1
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertEqual(genius_zone.data["output"], 1)
def test_when_iMode_set_then_state_mode_is_set_correctly(self):
"Check that the mode is set on the class"
for zone_mode, zone_mode_text in IMODE_TO_MODE.items():
with self.subTest(zone_mode=zone_mode, zone_mode_text=zone_mode_text):
self.raw_json["iMode"] = zone_mode
self.raw_json["zoneSubType"] = 1
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertEqual(genius_zone.data["mode"], zone_mode_text)
def test_when_iType_should_set_temperature_state_temperature_set_correctly(self):
"Check that the temperature is set for certain values of iType"
temperature = 20.0
self.raw_json["fPV"] = temperature
test_values = (
ZONE_TYPE.ControlSP,
ZONE_TYPE.TPI,
ZONE_TYPE.Manager
)
for zone_type in test_values:
with self.subTest(zone_type=zone_type):
self.raw_json["iType"] = zone_type
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertEqual(genius_zone.data["temperature"], temperature)
def test_when_iType_should_not_set_temperature_state_temperature_not_set(self):
"Check that the temperature is not set for certain values of iType"
self.raw_json["fPV"] = 20.0
test_values = (
ZONE_TYPE.OnOffTimer,
ZONE_TYPE.ControlOnOffPID,
ZONE_TYPE.Surrogate
)
for zone_type in test_values:
with self.subTest(zone_type=zone_type):
self.raw_json["iType"] = zone_type
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertFalse("temperature" in genius_zone.data)
def test_when_iType_should_set_setpoint_state_setpoint_set_correctly(self):
"Check that the setpoint is set for certain values of iType"
setpoint = 21.0
self.raw_json["fSP"] = setpoint
test_values = (
ZONE_TYPE.ControlSP,
ZONE_TYPE.TPI
)
for zone_type in test_values:
with self.subTest(zone_type=zone_type):
self.raw_json["iType"] = zone_type
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertEqual(genius_zone.data["setpoint"], setpoint)
def test_when_iType_should_not_set_setpoint_state_setpoint_not_set(self):
"Check that the setpoint is not set for certain values of iType"
self.raw_json["fSP"] = 21.0
test_values = (
ZONE_TYPE.Manager,
ZONE_TYPE.ControlOnOffPID,
ZONE_TYPE.Surrogate
)
for zone_type in test_values:
with self.subTest(zone_type=zone_type):
self.raw_json["iType"] = zone_type
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertFalse("setpoint" in genius_zone.data)
def test_when_iType_OnOffTimer_fSP_not_zero_setpoint_state_setpoint_set_true(self):
"""Check that the setpoint is set to true when iType is OnOffTimer
and fSP is not zero"""
self.raw_json["fSP"] = 1.0
self.raw_json["iType"] = ZONE_TYPE.OnOffTimer
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertTrue(genius_zone.data["setpoint"])
def test_when_iType_OnOffTimer_fSP_zero_setpoint_state_setpoint_set_false(self):
"""Check that the setpoint is set to false when iType is OnOffTimer
and fSP is zero"""
self.raw_json["fSP"] = 0.0
self.raw_json["iType"] = ZONE_TYPE.OnOffTimer
genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub)
self.assertFalse(genius_zone.data["setpoint"])
|
nilq/baby-python
|
python
|
from django.conf.urls import url, include
from . import views
from django.urls import path
urlpatterns = [
path('', views.index, name = 'index'),
path('allcomment/',views.allcomment, name = 'allcomment'),
path('allexpert/',views.allexpert, name = 'allexpert'),
path('apply/',views.apply, name = 'apply'),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import wx
import wx.xrc
import time
import pyperclip
import os
import sys
import platform
import data
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"PocLibrary", pos = wx.DefaultPosition, size = wx.Size( 300,150 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 300,150 ), wx.Size( 300,150 ) )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"请选择查询的模块:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
bSizer1.Add( self.m_staticText2, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
m_comboBox1Choices = data.module_list
self.m_comboBox1 = wx.ComboBox( self, wx.ID_ANY, u"请选择!", wx.DefaultPosition, wx.Size( 150,-1 ), m_comboBox1Choices, 0 )
bSizer1.Add( self.m_comboBox1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_button1 = wx.Button( self, wx.ID_ANY, u"确定", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1.Add( self.m_button1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_button1.Bind(wx.EVT_BUTTON, self.select_module)
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
def select_module(self, event):
global module
module = self.m_comboBox1.GetValue()
if module in data.module_list:
win = MyFrame2(parent=None)
win.Show()
time.sleep(0.5)
self.Destroy()
else:
temp_win = MyFrame3(parent=None)
temp_win.Show()
def __del__( self ):
pass
###########################################################################
## Class MyFrame2
###########################################################################
class MyFrame2 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"PocLibrary - Produced by Coldsnap", pos = wx.DefaultPosition, size = wx.Size( 800,750 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 800,750 ), wx.Size( 800,750 ) )
wSizer1 = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"请选择查询的POC/EXP:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
wSizer1.Add( self.m_staticText3, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_comboBox2Choices = self.setchoices(module)
self.m_comboBox2 = wx.ComboBox( self, wx.ID_ANY, u"请选择!", wx.DefaultPosition, wx.Size( 500,-1 ), m_comboBox2Choices, 0 )
wSizer1.Add( self.m_comboBox2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button2 = wx.Button( self, wx.ID_ANY, u"确定", wx.DefaultPosition, wx.DefaultSize, 0 )
wSizer1.Add( self.m_button2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button2.Bind(wx.EVT_BUTTON, self.selectPoc)
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"漏洞信息:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
wSizer1.Add( self.m_staticText4, 0, wx.ALL, 5 )
self.m_textCtrl1 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(700, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE)
self.m_textCtrl1.Enable(True)
self.m_textCtrl1.SetMinSize(wx.Size(700, 200))
self.m_textCtrl1.SetMaxSize(wx.Size(700, 200))
wSizer1.Add(self.m_textCtrl1, 0, wx.ALL, 5)
self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"利用信息:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
wSizer1.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.m_textCtrl2 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(400, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE)
self.m_textCtrl2.Enable(True)
self.m_textCtrl2.SetMinSize(wx.Size(700, 200))
self.m_textCtrl2.SetMaxSize(wx.Size(700, 200))
wSizer1.Add(self.m_textCtrl2, 0, wx.ALL, 5)
self.m_staticText71 = wx.StaticText( self, wx.ID_ANY, u"利用内容:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText71.Wrap( -1 )
wSizer1.Add( self.m_staticText71, 0, wx.ALL, 5 )
self.m_textCtrl3 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(700, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE)
self.m_textCtrl3.Enable(True)
self.m_textCtrl3.SetMinSize(wx.Size(700, 200))
self.m_textCtrl3.SetMaxSize(wx.Size(700, 200))
wSizer1.Add(self.m_textCtrl3, 0, wx.ALL, 5)
self.m_staticText9 = wx.StaticText( self, wx.ID_ANY, u"复制利用内容", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
wSizer1.Add( self.m_staticText9, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button7 = wx.Button( self, wx.ID_ANY, u"Copy", wx.DefaultPosition, wx.Size( 65,-1 ), 0 )
wSizer1.Add( self.m_button7, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button7.Bind(wx.EVT_BUTTON, self.copyCode)
self.m_staticText10 = wx.StaticText( self, wx.ID_ANY, u"重新选择模块", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText10.Wrap( -1 )
wSizer1.Add( self.m_staticText10, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button8 = wx.Button( self, wx.ID_ANY, u"Return", wx.DefaultPosition, wx.Size( 65,-1 ), 0 )
wSizer1.Add( self.m_button8, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button8.Bind(wx.EVT_BUTTON, self.back)
self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, u"退出程序", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText11.Wrap( -1 )
wSizer1.Add( self.m_staticText11, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button9 = wx.Button( self, wx.ID_ANY, u"Exit", wx.DefaultPosition, wx.Size( 65,-1 ), 0 )
wSizer1.Add( self.m_button9, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button9.Bind(wx.EVT_BUTTON, self.exit)
self.SetSizer( wSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# MyFrame1窗体模块参数送到MyFrame2创建对应窗体
def setchoices(self, module):
if module == "Drupal":
return data.drupalchoice
elif module == "F5":
return data.f5choice
elif module == "Fastjson":
return data.fastjsonchoice
elif module == "Jboss":
return data.jbosschoice
elif module == "Nexus":
return data.nexuschoice
elif module == "Shiro":
return data.shirochoice
elif module == "Apache-Solr":
return data.solrchoice
elif module == "Spring":
return data.springchoice
elif module == "Struts2":
return data.struts2choice
elif module == "Tomcat":
return data.tomcatchoice
elif module == "Weblogic":
return data.weblogicchoice
elif module == "Linux-local":
return data.linuxchoice
elif module == "Webmin":
return data.webminchoice
elif module == "IIS":
return data.iischoice
elif module == "OA-System":
return data.oachoice
elif module == "IOT":
return data.iotchoice
elif module == "CMS":
return data.cmschoice
elif module == "Windows":
return data.winchioce
elif module == "WebFramework":
return data.webframechoice
elif module == "Others":
return data.otherchoice
# MyFrame2窗体选择POC/EXP后获取具体选项
def selectPoc(self, event):
str = self.m_comboBox2.GetValue()
if str in data.drupalchoice:
self.readfile(str)
elif str in data.f5choice:
self.readfile(str)
elif str in data.jbosschoice:
self.readfile(str)
elif str in data.nexuschoice:
self.readfile(str)
elif str in data.shirochoice:
self.readfile(str)
elif str in data.solrchoice:
self.readfile(str)
elif str in data.springchoice:
self.readfile(str)
elif str in data.struts2choice:
self.readfile(str)
elif str in data.tomcatchoice:
self.readfile(str)
elif str in data.weblogicchoice:
self.readfile(str)
elif str in data.fastjsonchoice:
self.readfile(str)
elif str in data.linuxchoice:
self.readfile(str)
elif str in data.webminchoice:
self.readfile(str)
elif str in data.iischoice:
self.readfile(str)
elif str in data.oachoice:
self.readfile(str)
elif str in data.iotchoice:
self.readfile(str)
elif str in data.cmschoice:
self.readfile(str)
elif str in data.winchioce:
self.readfile(str)
elif str in data.webframechoice:
self.readfile(str)
elif str in data.otherchoice:
self.readfile(str)
else:
temp_win = MyFrame3(parent=None)
temp_win.Show()
# Windows下pyinstaller包含资源后在程序运行时产生临时文件夹,该函数返回资源临时文件夹地址
def source_path(self, relative_path):
# 是否Bundle Resource
if getattr(sys, 'frozen', False):
base_path = sys._MEIPASS # IDE运行报错,仅生成exe可执行文件时生效
else:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# 根据MyFrame2传回的具体POC/EXP读取对应文件
def readfile(self, str):
os_name = platform.system()
if os_name == 'Windows':
vuln_file = open(self.source_path('Library/') + module + "/" + str + "_vul.txt", encoding="utf-8")
info_file = open(self.source_path('Library/') + module + "/" + str + ".txt", encoding="utf-8")
code_file = open(self.source_path('Library/') + module + "/" + str, encoding="utf-8")
self.m_textCtrl1.SetValue(vuln_file.read())
vuln_file.close()
self.m_textCtrl2.SetValue(info_file.read())
info_file.close()
self.m_textCtrl3.SetValue(code_file.read())
code_file.close()
elif os_name == 'Darwin':
vuln_file = open(os.getcwd() + "/Library/" + module + "/" + str + "_vul.txt", encoding="utf-8")
info_file = open(os.getcwd() + "/Library/" + module + "/" + str + ".txt", encoding="utf-8")
code_file = open(os.getcwd() + "/Library/" + module + "/" + str, encoding="utf-8")
self.m_textCtrl1.SetValue(vuln_file.read())
vuln_file.close()
self.m_textCtrl2.SetValue(info_file.read())
info_file.close()
self.m_textCtrl3.SetValue(code_file.read())
code_file.close()
# Copy功能对应的事件处理函数
def copyCode(self, event):
pyperclip.copy(self.m_textCtrl3.GetValue())
# Back功能对应的事件处理函数
def back(self, event):
win = MyFrame1(parent=None)
win.Show()
time.sleep(0.5)
self.Destroy()
# Exit功能对应的事件处理函数
def exit(self, event):
time.sleep(0.5)
self.Destroy()
def __del__( self ):
pass
###########################################################################
## Class MyFrame3
###########################################################################
class MyFrame3 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 200,100 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 200,100 ), wx.Size( 200,100 ) )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText19 = wx.StaticText( self, wx.ID_ANY, u"\n\n错误,请重新选择!", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText19.Wrap( -1 )
self.m_staticText19.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
bSizer3.Add( self.m_staticText19, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.BOTTOM|wx.RIGHT, 5 )
self.SetSizer( bSizer3 )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
|
nilq/baby-python
|
python
|
'''
'''
import os
import numpy as np
from provabgs import models as Models
def test_DESIspeculator():
''' script to test the trained speculator model for DESI
'''
# initiate desi model
Mdesi = Models.DESIspeculator()
# load test parameter and spectrum
test_theta = np.load('/Users/chahah/data/gqp_mc/speculator/DESI_complexdust.theta_test.npy')
test_logspec = np.load('/Users/chahah/data/gqp_mc/speculator/DESI_complexdust.logspectrum_fsps_test.npy')
for i in range(10):
print(1.-(Mdesi._emulator(test_theta[i]) - np.exp(test_logspec[i]))/np.exp(test_logspec[i]))
print('')
return None
if __name__=="__main__":
test_DESIspeculator()
|
nilq/baby-python
|
python
|
import datetime
import difflib
# import datefinder
from dateparser.search import search_dates
from dateutil.parser import parse
from SMELT.validators.twitter.tweets import get_tweets
from SMELT.Validation import Validator
# from twitterscraper import
import twint
def fetch_closest_matching_tweet(username, message, time):
tweets = []
tweet = None
conf = 0
for tweet in get_tweets(username, pages=1):
print(tweet['time'].date(), time.date())
if tweet['time'].date() == time.date():
tweets.append(tweet)
# print(tweets)
messages = list(map(lambda x: x['text'], tweets))
matches = difflib.get_close_matches(message, messages, cutoff=0.7)
if matches:
text = matches[0]
tweet = list(filter(lambda x: x['text'] == text, tweets))[0]
conf = difflib.SequenceMatcher(None, text, message).ratio()
else:
conf = 1
return tweet, conf
class SimpleTwitterValidator(Validator):
display_name = ""
username = ""
body = ""
time = ""
conf = 0
failed = False
tweet = {}
tc = None
def __init__(self, image, **kwargs):
super().__init__(image, confidence=0.9, **kwargs)
if SimpleTwitterValidator.tc is None:
SimpleTwitterValidator.setup()
@staticmethod
def setup(config=None, user_list=()):
if config:
SimpleTwitterValidator.tc = config
else:
SimpleTwitterValidator.tc = twint.Config()
SimpleTwitterValidator.tc.Members_list = user_list
SimpleTwitterValidator.tc.Database
def get_tweet_date(self):
matches = list(datefinder.find_dates(self.ocr.string))
for line in self.ocr.lines:
matches2 = parse()
print(matches2)
# d = matches[0]
# try:
# date = '-'.join(dateline.split('-')[:2]).strip()
# try:
# time = datetime.datetime.strptime(date, '%I:%M %p - %m/%d/%y')
# except ValueError:
# time = datetime.datetime.strptime(date, '%I:%M %p - %b %d, %Y')
return matches[0]
def handle(self):
print(self.ocr.lines)
username = self.ocr.lines[1].split('@')[-1]
message = ' '.join(self.ocr.chunks[1])
time = self.get_tweet_date()
print(time, username)
self.tweet, self.conf = fetch_closest_matching_tweet(username, message, time)
if self.tweet is None:
self.failed = True
def confidence(self):
return max(min(self.conf + 0.01, 1), 0)
def __str__(self):
return """
\rTWEET: %s
\rCONFIDENCE: %f
\rPASSING: %r
""" % (self.tweet, self.confidence(), self.passing())
|
nilq/baby-python
|
python
|
#
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Define the Collection Builder utilities for Landsat data products."""
import logging
import tarfile
from datetime import datetime
from pathlib import Path
from bdc_core.decorators.utils import working_directory
from ...config import Config
class LandsatProduct:
"""Define base class for handling Landsat data products."""
def __init__(self, scene_id: str):
"""Build a Landsat class."""
self.scene_id = scene_id
self._fragments = LandsatProduct.parse_scene_id(scene_id)
@property
def scene_fragments(self):
if self._fragments is None:
self._fragments = LandsatProduct.parse_scene_id(self.scene_id)
return self._fragments
@staticmethod
def parse_scene_id(scene_id: str):
"""Parse a Landsat Scene Identifier."""
fragments = scene_id.split('_')
if len(fragments) != 7:
raise ValueError('Invalid scene id Landsat')
return fragments
@property
def id(self) -> str:
"""Retrieve Landsat Collection ID on Brazil Data Cube."""
raise NotImplementedError()
@property
def level(self) -> int:
"""Retrieve Landsat Collection Level."""
raise NotImplementedError()
def satellite(self) -> str:
"""Retrieve scene satellite."""
part = self._fragments[0]
return part[-2:]
def tile_id(self) -> str:
"""Retrieve Landsat scene Path row."""
return self._fragments[2]
def source(self) -> str:
"""Retrieve Landsat source part from scene id."""
return self._fragments[0]
def sensing_date(self) -> datetime:
"""Retrieve Landsat scene sensing date."""
return datetime.strptime(self._fragments[3], '%Y%m%d')
def get_band_map(self) -> dict:
raise NotImplementedError()
def google_path(self) -> Path:
"""Retrieve a formal path for Landsat on Google Provider.
Example:
>>> scene = LandsatDigitalNumber08('LC08_L1GT_044034_20130330_20170310_01_T2')
>>> print(str(scene.google_path()))
... 'LC08/01/044/034/LC08_L1GT_044034_20130330_20170310_01_T2'
"""
first_part = Path(self._fragments[0])
path = self._fragments[2][:3]
row = self._fragments[2][-3:]
path = first_part / '01' / path / row / self.scene_id
return path
def path(self, prefix=Config.DATA_DIR):
"""Retrieve relative path on Brazil Data Cube cluster.
Example:
>>> scene = LandsatDigitalNumber08('LC08_L1GT_044034_20130330_20170310_01_T2')
>>> print(str(scene.path()))
... '/gfs/Repository/Archive/LC8DN/2013-03/044034'
"""
year_month = self.sensing_date().strftime('%Y-%m')
scene_path = Path(prefix or '') / 'Repository/Archive' / self.id / year_month / self.tile_id()
return scene_path
def compressed_file(self):
"""Retrieve path to the compressed file (L1)."""
year_month = self.sensing_date().strftime('%Y-%m')
product_version = int(self._fragments[0][-2:])
if product_version == 8:
collection = 'LC8'
else:
collection = '{}{}'.format(self._fragments[0][:2], product_version)
scene_path = Path(Config.DATA_DIR) / 'Repository/Archive' / collection / year_month / self.tile_id()
return scene_path / '{}.tar.gz'.format(self.scene_id)
def compressed_file_bands(self):
relative_path = self.compressed_file().parent
files = [
relative_path / '{}_{}.TIF'.format(self.scene_id, band)
for band in self.get_band_map().values()
]
files.append(relative_path / '{}_ANG.txt'.format(self.scene_id))
files.append(relative_path / '{}_MTL.txt'.format(self.scene_id))
return files
def get_files(self):
"""Try to find of file names from Brazil Data Cube Cluster.
Note:
The scene must be published in order to retrieve the file list.
Example:
>>> scene = LandsatDigitalNumber08('LC08_L1TP_220069_20180618_20180703_01_T1')
>>> print(str(scene.path()))
... ['/gfs/Repository/Archive/LC8DN/2018-06/220069/LC08_L1TP_220069_20180618_20180703_01_T1_B1.TIF',
... '/gfs/Repository/Archive/LC8DN/2018-06/220069/LC08_L1TP_220069_20180618_20180703_01_T1_B2.TIF']
"""
scene_path = self.path()
scene_id_without_processing_date = '{}_*_{}*'.format(
'_'.join(self._fragments[:4]),
'_'.join(self._fragments[-2:])
)
logging.debug('Searching on {} with {}'.format(str(scene_path), scene_id_without_processing_date))
files = scene_path.glob(scene_id_without_processing_date)
return list([f for f in files if f.suffix.lower() == '.tif'])
class LandsatDigitalNumber08(LandsatProduct):
"""Landsat 8 Digital Number."""
id = 'LC8DN'
level = 1
def get_band_map(self) -> dict:
return dict(
coastal='B1', blue='B2', green='B3', red='B4', nir='B5', swir1='B6', swir2='B7',
quality='BQA', panchromatic='B8', cirrus='B9', tirs1='B10', tirs2='B11'
)
class LandsatSurfaceReflectance08(LandsatProduct):
"""Landsat 8 Surface Reflectance."""
id = 'LC8SR'
level = 2
def get_band_map(self) -> dict:
return dict(
coastal='sr_band1', blue='sr_band2', green='sr_band3', red='sr_band4', nir='sr_band5',
swir1='sr_band6', swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4'
)
class LandsatNBAR08(LandsatProduct):
"""Landsat 8 Nadir BRDF Adjusted Reflectance."""
id = 'LC8NBAR'
level = 3
def get_band_map(self) -> dict:
return dict(
blue='sr_band2', green='sr_band3', red='sr_band4', nir='sr_band5',
swir1='sr_band6', swir2='sr_band7', quality='pixel_qa'
)
class LandsatDigitalNumber07(LandsatProduct):
"""Landsat 7 Digital Number."""
id = 'L7DN'
level = 1
def get_band_map(self) -> dict:
return dict(
blue='B1', green='B2', red='B3', nir='B4', swir1='B5', tirs1='B6_VCID_1', tirs2='B6_VCID_2',
swir2='B7', panchromatic='B8', quality='BQA'
)
class LandsatSurfaceReflectance07(LandsatProduct):
"""Landsat 7 Surface Reflectance."""
id = 'L7SR'
level = 2
def get_band_map(self) -> dict:
return dict(
blue='sr_band1', green='sr_band2', red='sr_band3', nir='sr_band4', swir1='sr_band5',
swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4'
)
class LandsatDigitalNumber05(LandsatProduct):
"""Landsat 5 Digital Number."""
id = 'L5DN'
level = 1
def get_band_map(self) -> dict:
return dict(
blue='B1', green='B2', red='B3', nir='B4', swir1='B5',
tirs='B6', swir2='B7', quality='BQA'
)
class LandsatSurfaceReflectance05(LandsatProduct):
"""Landsat 5 Surface Reflectance."""
id = 'L5SR'
level = 2
def get_band_map(self) -> dict:
return dict(
blue='sr_band1', green='sr_band2', red='sr_band3', nir='sr_band4', swir1='sr_band5',
swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4'
)
class LandsatFactory:
"""Define a factory to identify a Landsat product based on scene identifier."""
map = dict(
l1=dict(),
l2=dict(),
l3=dict()
)
def register(self):
"""Initialize factory object."""
self.map['l1'][LandsatDigitalNumber05.id] = LandsatDigitalNumber05
self.map['l2'][LandsatSurfaceReflectance05.id] = LandsatSurfaceReflectance05
self.map['l1'][LandsatDigitalNumber07.id] = LandsatDigitalNumber07
self.map['l2'][LandsatSurfaceReflectance07.id] = LandsatSurfaceReflectance07
self.map['l1'][LandsatDigitalNumber08.id] = LandsatDigitalNumber08
self.map['l2'][LandsatSurfaceReflectance08.id] = LandsatSurfaceReflectance08
self.map['l3'][LandsatNBAR08.id] = LandsatNBAR08
def get_from_collection(self, collection: str):
"""Retrieve the respective Landsat driver from given collection."""
for drivers_by_level in self.map.values():
for driver_name in drivers_by_level:
if collection == driver_name:
return drivers_by_level[driver_name]
raise ValueError('Not found a valid driver for {}.'.format(collection))
def get_from_sceneid(self, scene_id: str, level=1) -> LandsatProduct:
"""Retrieve the respective Landsat driver from given scene id."""
fragments = LandsatProduct.parse_scene_id(scene_id)
drivers_by_level = self.map.get('l{}'.format(level)) or dict()
scene_satellite = int(fragments[0][-2:])
for key in drivers_by_level:
satellite = key[1]
if not satellite.isdigit():
satellite = key[2]
satellite = int(satellite)
if scene_satellite == satellite:
driver = drivers_by_level[key]
if driver.level == level:
return driver(scene_id)
raise ValueError('Not found a valid driver for {}'.format(scene_id))
factory = LandsatFactory()
def compress_landsat_scene(scene: LandsatProduct, data_dir: str):
"""Compress the Landsat files to tar.gz.
Args:
scene - Landsat Product
data_dir - Path to search for files
"""
try:
context_dir = Path(data_dir)
if not context_dir.exists() or not context_dir.is_dir():
raise IOError('Invalid directory to compress Landsat. "{}"'.format(data_dir))
compressed_file_path = Path(data_dir) / scene.compressed_file().name
files = scene.compressed_file_bands()
logging.debug('Compressing {}'.format(str(compressed_file_path)))
# Create compressed file and make available
with tarfile.open(compressed_file_path, 'w:gz') as compressed_file:
with working_directory(str(context_dir)):
for f in files:
compressed_file.add(f.name)
except BaseException:
logging.error('Could not compress {}.tar.gz'.format(scene.scene_id), exc_info=True)
raise
return compressed_file_path
|
nilq/baby-python
|
python
|
"""Custom CSV-related functionality."""
import csv
import os
def create_csv():
"""Create new csv to store git-geo result
Delete any existing csv and the create new csv.
Args:
None
Returns:
None
"""
# delete csv if it already exists
filename = "git-geo-results.csv"
if os.path.exists(filename):
os.remove(filename)
# Create new csv file with column names
with open(filename, "w") as file:
fieldnames = ["pkg", "username", "location"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
def add_committer_to_csv(pkg, username, location):
"""Write committer info to existing csv file
Use to create dataset of location data for analysis.
Args:
pkg - package name
username - GitHub username
location - Geographic info from GitHub profile
Returns:
null
"""
with open("git-geo-results.csv", "a") as file:
fieldnames = ["pkg", "username", "location"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writerow({"pkg": pkg, "username": username, "location": location})
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, division, print_function, with_statement
from __future__ import unicode_literals
from tornado import ioloop, web, websocket, httpserver, concurrent
from collections import defaultdict
import mock
class DeepstreamHandler(websocket.WebSocketHandler):
connections = defaultdict(set)
received_messages = defaultdict(list)
sent_messages = defaultdict(list)
callbacks = defaultdict(mock.Mock)
def open(self):
self._path = self.request.path
self._messages = []
DeepstreamHandler.connections[self._path].add(self)
self._msg_future = None
self._close_future = None
def on_message(self, message):
DeepstreamHandler.received_messages[self._path].append(message)
if self._msg_future:
self._msg_future.set_result(message)
def write_message(self, message):
DeepstreamHandler.sent_messages[self._path].append(message)
return super(DeepstreamHandler, self).write_message(message)
def on_close(self):
DeepstreamHandler.connections[self._path].remove(self)
if self._close_future:
self._close_future.set_result(True)
def message_future(self):
self._msg_future = concurrent.Future()
return self._msg_future
def close_future(self):
self._close_future = concurrent.Future()
return self._close_future
def _connections(request_path):
return DeepstreamHandler.connections[request_path]
def _sent_messages(request_path):
return DeepstreamHandler.sent_messages[request_path]
def _received_messages(request_path):
return DeepstreamHandler.received_messages[request_path]
def _num_connection(request_path):
return len(_connections(request_path))
def _create_server(port, path):
application = web.Application([
(path, DeepstreamHandler),
])
server = httpserver.HTTPServer(application)
server.listen(port)
return server
def before_all(context):
context.uid_patcher = mock.patch("deepstreampy.utils.get_uid",
return_value="<UID>")
context.uid_patcher.start()
def after_all(context):
context.uid_patcher.stop()
def after_step(context, step):
if "the server sends the message" in step.name:
context.io_loop.call_later(0.03, context.io_loop.stop)
context.io_loop.start()
def before_scenario(context, scenario):
if ioloop.IOLoop.initialized():
context.io_loop = ioloop.IOLoop.current()
else:
context.io_loop = ioloop.IOLoop(make_current=True)
context.server = None
context.other_server = None
DeepstreamHandler.connections.clear()
DeepstreamHandler.received_messages.clear()
DeepstreamHandler.sent_messages.clear()
DeepstreamHandler.callbacks.clear()
context.create_server = _create_server
context.num_connections = _num_connection
context.connections = _connections
context.sent_messages = _sent_messages
context.received_messages = _received_messages
context.client = None
context.client_errors = []
context.event_callbacks = {}
context.has_callbacks = {}
context.snapshot_callbacks = {}
context.subscribe_callback = None
context.presence_callback = None
context.presence_query_callback = None
context.rpc_provide_callback = None
context.rpc_request_callback = None
context.listen_callback = None
context.rpc_response = None
context.records = {}
context.write_acknowledge = mock.Mock()
context.login_future = None
def after_scenario(context, scenario):
context.io_loop.clear_current()
context.io_loop.close(all_fds=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
import logging
from pathlib import Path
from questionary import prompt
from ... import constants as C
from ...core import display
from ...core.app import App
from ...core.arguments import get_args
from ...core.crawler import Crawler
from .open_folder_prompt import display_open_folder
logger = logging.getLogger(__name__)
def resume_session():
args = get_args()
output_path = args.resume or C.DEFAULT_OUTPUT_PATH
resumable_meta_data = []
for meta_file in Path(output_path).glob('**/' + C.META_FILE_NAME):
with open(meta_file, 'r', encoding="utf-8") as file:
data = json.load(file)
if 'session' in data and not data['session']['completed']:
resumable_meta_data.append(data)
# end if
# end with
# end for
metadata = None
if len(resumable_meta_data) == 1:
metadata = resumable_meta_data[0]
elif len(resumable_meta_data) > 1:
answer = prompt([
{
'type': 'list',
'name': 'resume',
'message': 'Which one do you want to resume?',
'choices': display.format_resume_choices(resumable_meta_data),
}
])
index = int(answer['resume'].split('.')[0])
metadata = resumable_meta_data[index - 1]
# end if
if not metadata:
print('No unfinished download to resume\n')
display.app_complete()
return
# end if
app = load_session_from_metadata(metadata)
assert isinstance(app.crawler, Crawler)
print('Resuming', app.crawler.novel_title)
print('Output path:', app.output_path)
app.initialize()
app.crawler.initialize()
if app.can_do('login') and app.login_data:
logger.debug('Login with %s', app.login_data)
app.crawler.login(*list(app.login_data))
# end if
app.start_download()
app.bind_books()
app.compress_books()
app.destroy()
display.app_complete()
display_open_folder(app.output_path)
# end def
def load_session_from_metadata(data) -> App:
app = App()
session_data = data['session']
app.output_path = session_data['output_path']
app.user_input = session_data['user_input']
app.login_data = session_data['login_data']
app.pack_by_volume = session_data['pack_by_volume']
app.output_formats = session_data['output_formats']
app.good_file_name = session_data['good_file_name']
app.no_append_after_filename = session_data['no_append_after_filename']
logger.info('Novel Url: %s', data['url'])
app.init_crawler(data['url'])
if not isinstance(app.crawler, Crawler):
raise Exception('No crawler found for ' + data['url'])
app.crawler.novel_title = data['title']
app.crawler.novel_author = data['author']
app.crawler.novel_cover = data['cover']
app.crawler.volumes = data['volumes']
app.crawler.chapters = data['chapters']
app.crawler.is_rtl = data['rtl']
app.chapters = [
chap for chap in data['chapters']
if chap['id'] in session_data['download_chapters']
]
logger.info('Number of chapters to download: %d', len(app.chapters))
logger.debug(app.chapters)
return app
# end def
|
nilq/baby-python
|
python
|
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from rest_framework import status
from perm.models import PerMisson
from perm.filters import PerMissonFilter
from application.models import Application
from users.models import tGroup
from django.contrib.auth import get_user_model
from perm.serializers import PermListSerializer, \
PermDetailSerializer, \
PermCreateSerializer, \
PermListSimpleSerializer, \
PermUserSerializer, \
PermAppSerializer, \
PermtGroupSerializer, \
PermUpdateSerializer
User = get_user_model()
# 权限分页
class PermissonPagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 100
# 权限视图
class PermissonViewSet(viewsets.ModelViewSet):
queryset = PerMisson.objects.all()
serializer_class = PermDetailSerializer
pagination_class = PermissonPagination
filter_backends = (DjangoFilterBackend,)
filter_class = PerMissonFilter
def get_serializer_class(self):
if self.action == 'list':
return PermListSerializer
if self.action == 'create':
return PermCreateSerializer
if self.action == 'update':
return PermUpdateSerializer
return PermDetailSerializer
@action(detail=False, methods=['get'], name="get all permisson", url_path="getall")
def get_perm_all(self, request, pk=None):
permsqs = PerMisson.objects.all()
serializer = PermListSimpleSerializer(permsqs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all users", url_path="getusers")
def get_all_users(self, request, pk=None):
users = User.objects.all()
serializer = PermUserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all apps", url_path="getapps")
def get_all_apps(self, request, pk=None):
apps = Application.objects.all()
serializer = PermAppSerializer(apps, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=['get'], name="get all groups", url_path="getgroups")
def get_all_tgroups(self, request, pk=None):
tgroups = tGroup.objects.all()
serializer = PermtGroupSerializer(tgroups, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=['get'], name='group outside user', url_path="getusers_out")
def get_outside_user(self, request, pk=None):
users = User.objects.exclude(granted_by_permissions__id=pk)
serializer = PermUserSerializer(users, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(detail=True, methods=['get'], name='group outside apps', url_path="getapps_out")
def get_outside_apps(self, request, pk=None):
apps = Application.objects.exclude(granted_by_permissions__id=pk)
serializer = PermAppSerializer(apps, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(detail=True, methods=['get'], name='group outside groups', url_path="getgroups_out")
def get_outside_tgroup(self, request, pk=None):
groups = tGroup.objects.exclude(granted_by_permissions__id=pk)
serializer = PermtGroupSerializer(groups, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
|
nilq/baby-python
|
python
|
import gc
import json
import warnings
import flask_restful
from eventlet import greenthread
from injector import CallableProvider, inject
from flask import Blueprint, Flask
from flask.templating import render_template_string
from flask.views import View
from nose.tools import eq_
from flask_injector import request, FlaskInjector
def test_injections():
l = [1, 2, 3]
counter = [0]
def inc():
counter[0] += 1
def conf(binder):
binder.bind(str, to="something")
binder.bind(list, to=l)
app = Flask(__name__)
@app.route('/view1')
@inject(content=str)
def view1(content):
inc()
return render_template_string(content)
@inject(content=list)
class View2(View):
def dispatch_request(self):
inc()
return render_template_string('%s' % self.content)
@app.before_request
@inject(c=list)
def br(c):
inc()
eq_(c, l)
@app.after_request
@inject(c=list)
def ar(response_class, c):
inc()
eq_(c, l)
return response_class
@app.context_processor
@inject(c=list)
def cp(c):
inc()
eq_(c, l)
return {}
@app.teardown_request
@inject(c=list)
def tr(sender, exc=None, c=None):
inc()
eq_(c, l)
app.add_url_rule('/view2', view_func=View2.as_view('view2'))
FlaskInjector(app=app, modules=[conf])
with app.test_client() as c:
response = c.get('/view1')
eq_(response.get_data(as_text=True), "something")
with app.test_client() as c:
response = c.get('/view2')
eq_(response.get_data(as_text=True), '%s' % (l,))
eq_(counter[0], 10)
def test_resets():
app = Flask(__name__)
counter = [0]
class Scope(object):
def __init__(self, injector):
pass
def prepare(self):
pass
def cleanup(self):
counter[0] += 1
@app.route('/')
def index():
eq_(counter[0], 1)
return 'asd'
FlaskInjector(app, request_scope_class=Scope)
eq_(counter[0], 0)
with app.test_client() as c:
c.get('/')
eq_(counter[0], 1)
def test_memory_leak():
# The RequestScope holds references to GreenThread objects which would
# cause memory leak
app = Flask(__name__)
FlaskInjector(app)
@app.route('/')
def index():
return 'test'
def get_request():
with app.test_client() as c:
c.get('/')
green_thread = greenthread.spawn(get_request)
green_thread.wait()
# Delete green_thread so the GreenThread object is dereferenced
del green_thread
# Force run garbage collect to make sure GreenThread object is collected if
# there is no memory leak
gc.collect()
greenthread_count = len([
obj for obj in gc.get_objects()
if type(obj) is greenthread.GreenThread])
eq_(greenthread_count, 0)
def test_doesnt_raise_deprecation_warning():
app = Flask(__name__)
def provide_str():
return 'this is string'
def configure(binder):
binder.bind(str, to=CallableProvider(provide_str), scope=request)
@app.route('/')
@inject(s=str)
def index(s):
return s
FlaskInjector(app=app, modules=[configure])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with app.test_client() as c:
c.get('/')
eq_(len(w), 0, map(str, w))
def test_jinja_env_globals_support_injection():
app = Flask(__name__)
def configure(binder):
binder.bind(str, to='xyz')
@inject(s=str)
def do_something_helper(s):
return s
app.jinja_env.globals['do_something'] = do_something_helper
@app.route('/')
def index():
return render_template_string('{{ do_something() }}')
FlaskInjector(app=app, modules=[configure])
with app.test_client() as c:
eq_(c.get('/').get_data(as_text=True), 'xyz')
def test_error_handlers_support_injection():
app = Flask(__name__)
class CustomException(Exception):
pass
@app.route('/custom-exception')
def custom_exception():
raise CustomException()
@app.errorhandler(404)
@inject(s=str)
def handle_404(error, s):
return s, 404
@app.errorhandler(CustomException)
@inject(s=str)
def handle_custom_exception(error, s):
return s, 500
def configure(binder):
binder.bind(str, to='injected content')
FlaskInjector(app=app, modules=[configure])
with app.test_client() as c:
response = c.get('/this-page-does-not-exist')
eq_((response.status_code, response.get_data(as_text=True)),
(404, 'injected content'))
response = c.get('/custom-exception')
eq_((response.status_code, response.get_data(as_text=True)),
(500, 'injected content'))
def test_view_functions_arent_modified_globally():
# Connected to GH #6 "Doing multiple requests on a flask test client on an injected route
# fails for all but the first request"
# The code would modify view functions generated by View.as_view(), it wasn't an issue with
# views added directly to an application but if function was added to a blueprint and
# that blueprint was used in multiple applications it'd raise an error
class MyView(View):
pass
blueprint = Blueprint('test', __name__)
blueprint.add_url_rule('/', view_func=MyView.as_view('view'))
app = Flask(__name__)
app.register_blueprint(blueprint)
FlaskInjector(app=app)
app2 = Flask(__name__)
app2.register_blueprint(blueprint)
# it'd fail here
FlaskInjector(app=app2)
def test_view_args_and_class_args_are_passed_to_class_based_views():
class MyView(View):
def __init__(self, class_arg):
self.class_arg = class_arg
def dispatch_request(self, dispatch_arg):
return '%s %s' % (self.class_arg, dispatch_arg)
app = Flask(__name__)
app.add_url_rule('/<dispatch_arg>', view_func=MyView.as_view('view', class_arg='aaa'))
FlaskInjector(app=app)
client = app.test_client()
response = client.get('/bbb')
print(response.data)
eq_(response.data, b'aaa bbb')
def test_flask_restful_integration_works():
@inject(_int=int)
class HelloWorld(flask_restful.Resource):
def get(self):
return {'int': self._int}
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/')
FlaskInjector(app=app)
client = app.test_client()
response = client.get('/')
data = json.loads(response.data.decode('utf-8'))
eq_(data, {'int': 0})
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from validator import Validator
class VimLParserLint(Validator):
__filetype__ = 'vim'
checker = 'vimlparser'
args = ''
regex = r"""
.+?:
(?P<lnum>\d+):
(?P<col>\d+):
\svimlparser:\s
(?P<text>
(
(
(?P<error>E)
|
(?P<warning>W)
)
(?P<code>\d+):\s
)?
.+
)"""
|
nilq/baby-python
|
python
|
a = 4.9
b = 9.8
sum1 = a + b
print('resultado:', sum1)
|
nilq/baby-python
|
python
|
from functools import reduce
from itertools import combinations
from operator import mul
from aocd import data as expense_report
entries = list(map(int, expense_report.splitlines()))
for part in (1, 2):
for combo in combinations(entries, part+1):
if sum(combo) == 2020:
print(f'Part {part}:', reduce(mul, combo))
break
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 18:50:45 2021
@author: patrick
"""
from .Facebook_Chat_Analysis import *
|
nilq/baby-python
|
python
|
"""Module for the base objects of the abstract argumentation frameworks."""
from .relation import RelationType
from .relation import Relation
from .premise import FallacyType
from .premise import Premise
from .graph import Graph
from .extension import Extension
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped, Quaternion
from mavros_msgs.srv import CommandBool, CommandTOL, SetMode, SetModeRequest
from mavros_msgs.msg import State
import time
from tf.transformations import quaternion_from_euler
flight_alt = 1.0 # (m)
class TakeOffLand():
def __init__(self, altitude = flight_alt):
rospy.init_node('takeoff_land') # creates the node
# Subscribers
self.state_sub = rospy.Subscriber("uav1/mavros/state", State, self.state_cb)
# Publishers
self.local_pose_pub = rospy.Publisher("uav1/mavros/setpoint_position/local", PoseStamped, queue_size=10)
# Clients
self.arm_client = rospy.ServiceProxy("uav1/mavros/cmd/arming", CommandBool)
self.land_client = rospy.ServiceProxy("uav1/mavros/cmd/land", CommandTOL)
self.current_state = None
self.des_z = altitude
self.rate = rospy.Rate(20)
self.arm()
def state_cb(self, msg):
self.current_state = msg
def arm(self):
# wait for connect
while not rospy.is_shutdown() and self.current_state == None:
rospy.loginfo("waiting for connection")
self.rate.sleep()
# must be streaming points before allowed to switch to offboard
pose = PoseStamped()
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = self.des_z
for i in range(100):
self.local_pose_pub.publish(pose)
self.rate.sleep()
# enable offboard mode and arm
last_request = rospy.get_time()
set_mode = rospy.ServiceProxy("uav1/mavros/set_mode", SetMode)
req = SetModeRequest()
req.custom_mode = "OFFBOARD"
while not rospy.is_shutdown() and (self.current_state.mode != req.custom_mode):
self.local_pose_pub.publish(pose)
if rospy.get_time() - last_request > 5.0: # check every 5 seconds
try:
set_mode.call(req)
except rospy.ServiceException, e:
print "Service did not process request: %s"%str(e)
last_request = rospy.get_time()
self.rate.sleep()
rospy.loginfo("Switched to offboard mode")
while not rospy.is_shutdown() and not self.current_state.armed:
if not self.current_state.armed and rospy.get_time() - last_request > 5.0:
if self.arm_client(True):
rospy.loginfo("Vehicle armed")
last_request = rospy.get_time()
self.rate.sleep()
def take_off_hover(self):
# define hover pose (set point)
pose = PoseStamped()
pose.header.stamp = rospy.get_rostime()
pose.header.frame_id = 'mavsetp'
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = self.des_z
q = quaternion_from_euler(0, 0, 0)
pose.pose.orientation = Quaternion(*q)
rospy.loginfo("Vehicle taking off")
# publish pose for however long we want to hover
while not rospy.is_shutdown():
self.local_pose_pub.publish(pose)
self.rate.sleep()
rospy.loginfo("Vehicle hovering")
if __name__ == "__main__":
takeoff_land = TakeOffLand()
takeoff_land.take_off_hover()
rospy.spin()
|
nilq/baby-python
|
python
|
from flask import ( g, redirect, url_for )
from tmc.db import get_db, make_dicts
# Get list of all industries available in the database.
def get_industries():
db = get_db()
try:
db.row_factory = make_dicts
query = db.execute(
'SELECT id as db_id, industry_name as Industry FROM industries ORDER BY industry_name ASC').fetchall()
return query
except TypeError:
#embed()
return False #Change this for something more meaningful -- warning/alert
|
nilq/baby-python
|
python
|
'''
Miscellaneous math functions.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
def matrix_sqrt(X=None, symmetric=False, inverse=False, eigs=None):
'''Returns the matrix square root of X.
Arguments:
`X` (square class::`numpy.ndarrray`)
`symmetric` (bool, default False):
If True, `X` is assumed to be symmetric, which speeds up
calculation of the square root.
`inverse` (bool, default False):
If True, computes the matrix square root of inv(X).
`eigs` (2-tuple):
`eigs` must be a 2-tuple whose first element is an array of
eigenvalues and whose second element is an ndarray of eigenvectors
(individual eigenvectors are in columns). If this argument is
provided, computation of the matrix square root is much faster. If
this argument is provided, the `X` argument is ignored (in this
case, it can be set to None).
Returns a class::`numpy.ndarray` `S`, such that S.dot(S) = X
'''
if eigs is not None:
(vals, V) = eigs
else:
(vals, V) = np.linalg.eig(X)
k = len(vals)
if inverse is False:
SRV = np.diag(np.sqrt(vals))
else:
SRV = np.diag(1. / np.sqrt(vals))
if symmetric:
return V.dot(SRV).dot(V.T)
else:
return V.dot(SRV).dot(np.linalg.inv(V))
def get_histogram_cdf_points(data, cdf_vals, ignore=None, mask=None):
'''Returns input values corresponding to the data's CDF values.
Arguments:
`data` (ndarray):
The data for which to determine the CDF values
`cdf_vals` (sequence of floats):
A sequence defining the CDF values for which the values of `data`
should be returned. Each value should be in the range [0, 1]. For
example, to get the values of `data` corresponding to the 1% lower
tail and 5% upper tail, this argument would be (0.01, 0.95).
`ignore` (numeric, default `None`):
A scalar value that should be ignored when computing histogram
points (e.g., a value that indicates bad data). If this valus is
not specified, all data are used.
Return value:
A list specifying the values in `data` that correspond to the
associated CDF values in `cdf_vals`.
'''
data = data.ravel()
if mask is not None:
data = data[mask.ravel() != 0]
if len(data) == 0:
raise Exception('All pixels are masked.')
if ignore is not None and ignore in data:
data = data[np.where(data != ignore)]
if len(data) == 0:
raise Exception('No data to display after masking and ignoring.')
isort = np.argsort(data)
N = len(data)
return [data[isort[int(x * (N - 1))]] for x in cdf_vals]
|
nilq/baby-python
|
python
|
import tkinter as tk
def get_line_numbers():
output = ''
row, col = text_editor.index("end").split('.') #row give the no of row in text
#print(int(row)-1)
for i in range(1, int(row)):
output += str(i) + '\n' #making a string with row no. with \n(next line)
#print(output)
return output
def update_line_numbers(event=None):
line_numbers = get_line_numbers()
line_number_bar.config(state='normal')
line_number_bar.delete('1.0', 'end')
line_number_bar.insert('1.0', line_numbers)
line_number_bar.config(state='disabled')
def on_content_changed(event=None):
if text_editor.edit_modified():
update_line_numbers()
text_editor.edit_modified(False)
root = tk.Tk()
line_number_bar = tk.Text(root, width=2, padx=3, takefocus=1,font=('Arial',14,'normal'), border=0,background='DarkOliveGreen1', state='disabled', wrap='none')
line_number_bar.pack(side='left', fill='y')
text_editor = tk.Text(root,font=('Arial',14,'normal'))
text_editor.config(wrap='word', relief=tk.FLAT)
text_editor.pack(fill=tk.BOTH, expand=True)
text_editor.bind('<<Modified>>',on_content_changed)
#text_editor.edit_modified(False)
root.mainloop()
|
nilq/baby-python
|
python
|
# -*- encoding=utf8 -*-
__author__ = "srz_zumix"
sys.path.append(r"../pmbase")
from airtest.core.api import *
from pmbase import PmBase
auto_setup(__file__)
# adb = ADB()
# def update():
# print adb.shell('dumpsys battery')
sleep_mul = 1
pm = PmBase(sleep_mul)
pm.setup()
def pm_sleep(s):
pm.pm_sleep(s)
def touch_positive_button():
return pm.touch_positive_button()
def touch_oncemore_button():
return pm.touch_oncemore_button()
def touch_next_button():
if touch_positive_button():
return True
return touch_oncemore_button()
def is_quest_select():
return pm.is_quest_select()
def touch_quest_banner(lv):
return pm.touch_quest_banner(lv)
def touch_result():
return pm.touch_result()
def check_bar():
im = pm.exists_battle_symbol()
if im:
pos = (im[0], im[1])
touch(pos)
pm_sleep(10)
return True
return False
def is_wait_bar():
if check_bar():
if check_bar():
check_bar()
return True
return False
def wait_battle():
if not pm.is_result_bg():
if not is_wait_bar():
return
pm.step_result()
def auto_battle(lv):
# once
if touch_quest_banner(lv):
touch_positive_button()
pm_sleep(10)
else:
touch_next_button()
while True:
wait_battle()
if is_quest_select():
break
else:
touch_next_button()
def auto_select_battle(lv):
while True:
auto_battle(lv)
def main():
auto_select_battle(4)
main()
|
nilq/baby-python
|
python
|
# coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
"""
This is the SpiceBot AI system. Based On Chatty cathy
"""
from sopel.tools import Identifier
from sopel.config.types import StaticSection, ListAttribute, ValidatedAttribute
import os
import tempfile
import aiml
from .Database import db as botdb
from .Config import config as botconfig
from .Read import read as botread
from .Users import users as botusers
from sopel_modules.spicemanip import spicemanip
class SpiceBot_AI_MainSection(StaticSection):
extra = ListAttribute('extra')
gender = ValidatedAttribute('gender', default='female')
class SpiceBot_AI():
def __init__(self):
self.setup_ai()
self.braindirs = []
self.dict = {
"patterncounts": 0,
"filecounts": 0,
"sessioncache": {},
"files": {}
}
# Load AIML kernel
self.aiml_kernel = aiml.Kernel()
# aiml parser
self.aiml_parser = aiml.AimlParser.create_parser()
# Don't warn for no matches
self.aiml_kernel._verboseMode = False
# Learn responses
self.load_saved_brain()
self.load_brain()
# Load bot values
self.load_bot_values()
def setup_ai(self):
botconfig.define_section("SpiceBot_AI", SpiceBot_AI_MainSection, validate=False)
botconfig.config.aibrain = os.path.join(botconfig.homedir, botconfig.config.core.basename + '.aibrain.brn')
def load_saved_brain(self):
if os.path.isfile(botconfig.config.aibrain):
self.aiml_kernel.bootstrap(brainFile=botconfig.config.aibrain)
self.save_brain()
def load_brain(self):
braindirs = botread.get_config_dirs("SpiceBot_AI")
# learn directories
self.learn(braindirs)
self.save_brain()
def load_bot_values(self):
current_bot_db = botdb.get_plugin_value('SpiceBot_AI', 'brain') or None
if current_bot_db:
for predicate in list(current_bot_db.keys()):
predval = current_bot_db[predicate]
self.aiml_kernel.setBotPredicate(predicate, predval)
# sopel nick
self.aiml_kernel.setBotPredicate("nick", botconfig.nick)
# gender
self.aiml_kernel.setBotPredicate("gender", botconfig.SpiceBot_AI.gender.lower())
if botconfig.SpiceBot_AI.gender.lower() not in ["male", "female"]:
self.aiml_kernel.setBotPredicate("gendertype", "item")
else:
self.aiml_kernel.setBotPredicate("gendertype", botconfig.SpiceBot_AI.gender.lower())
def learn(self, braindirs):
for braindir in braindirs:
if braindir not in self.braindirs:
self.braindirs.append(braindir)
# Count matches
for pathname in os.listdir(braindir):
self.dict["filecounts"] += 1
aimlfile = os.path.join(braindir, pathname)
data = open(aimlfile).read()
count = data.count('pattern')
count = count / 2
self.dict["patterncounts"] += int(count)
tempbrain = tempfile.mkstemp()[1]
with open(tempbrain, 'w') as fileo:
fileo.write(
"<aiml version='1.0.1' encoding='UTF-8'>"
" <!-- std-startup.xml -->\n"
" <category>\n"
" <pattern>LOAD AIML B</pattern>\n"
" <template>\n"
" <learn>{}</learn>\n"
" </template>\n"
" </category>\n"
"</aiml>".format(os.path.join(braindir, "*.aiml"))
)
self.aiml_kernel.learn(tempbrain)
self.aiml_kernel.respond("LOAD AIML B")
def on_message(self, bot, trigger, message):
nick = Identifier(trigger.nick)
nick_id = botusers.get_nick_id(nick, True)
self.check_user_import(nick, nick_id)
message = self.bot_message_precipher(bot, trigger, message)
aiml_response = self.aiml_kernel.respond(message, nick_id)
if aiml_response:
aiml_response = self.bot_message_decipher(bot, trigger, aiml_response)
self.save_nick_session(nick, nick_id)
self.save_brain()
return aiml_response
def bot_message_precipher(self, bot, trigger, message):
# punctuation
puct_dict = {"!": "exclamationmark", ".": "period", "?": "questionmark", ",": "comma"}
for puctuation in list(puct_dict.keys()):
message = message.replace(puctuation, puct_dict[puctuation])
# bot items
for botitem in ["nick"]:
messagelist = spicemanip(message, "create")
for i in range(len(messagelist)):
if messagelist[i].upper() == str(eval("bot." + botitem)).upper():
messagelist[i] = str("bot" + botitem).upper()
message = spicemanip(messagelist, 0)
for triggeritem in ["nick", "sender"]:
messagelist = spicemanip(message, "create")
for i in range(len(messagelist)):
if messagelist[i].upper() == str(eval("trigger." + botitem)).upper():
messagelist[i] = str("trigger" + botitem).upper()
message = spicemanip(messagelist, 0)
return message
def bot_message_decipher(self, bot, trigger, aiml_response):
# bot items
for botitem in ["nick"]:
aiml_response = aiml_response.replace("bot" + botitem, str(eval("bot." + botitem)))
# trigger items
for triggeritem in ["nick", "sender"]:
aiml_response = aiml_response.replace("trigger" + triggeritem, str(eval("trigger." + triggeritem)))
# pronouns
botgendertype = self.aiml_kernel.getBotPredicate("gendertype")
pronounsdict = {
"male": {
"main": "he",
"possess": "his",
"self": "himself",
},
"female": {
"main": "her",
"possess": "hers",
"self": "herself",
},
"item": {
"main": "it",
"possess": "its",
"self": "itself",
},
"point": {
"main": "you",
"possess": "yours",
"self": "yourself",
},
"group": {
"main": "them",
"possess": "theirs",
"self": "themselves",
},
}
for pronounitem in list(pronounsdict[botgendertype].keys()):
aiml_response = aiml_response.replace("BOTPRONOUN" + pronounitem, pronounsdict[botgendertype][pronounitem])
triggergendertype = self.getPredicate("gender", trigger.nick)
if not triggergendertype or triggergendertype == "":
triggergendertype = "point"
for pronounitem in list(pronounsdict[triggergendertype].keys()):
aiml_response = aiml_response.replace("TRIGGERPRONOUN" + pronounitem, pronounsdict[triggergendertype][pronounitem])
aiml_response = "\x0315" + aiml_response + "\x03"
return aiml_response
def getPredicate(self, predicate, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
self.aiml_kernel.getPredicate(predicate, nick_id)
def check_user_import(self, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
if nick_id not in list(self.dict["sessioncache"].keys()):
self.dict["sessioncache"][nick_id] = botdb.get_nick_value(nick, 'botai') or {}
for predicate in list(self.dict["sessioncache"][nick_id].keys()):
predval = self.dict["sessioncache"][nick_id][predicate]
self.aiml_kernel.setPredicate(predicate, predval, nick_id)
# defaults
if "nick" not in list(self.dict["sessioncache"][nick_id].keys()):
self.dict["sessioncache"][nick_id]["nick"] = nick
self.aiml_kernel.setPredicate("nick", nick, nick_id)
def save_nick_session(self, nick, nick_id=None):
if not nick_id:
nick = Identifier(nick)
nick_id = botusers.get_nick_id(nick, True)
sessionData = self.aiml_kernel.getSessionData(nick_id)
botdb.set_nick_value(nick, 'botai', sessionData)
def save_brain(self):
self.aiml_kernel.saveBrain(botconfig.config.aibrain)
botsessiondata = self.aiml_kernel._botPredicates
botdb.set_plugin_value('SpiceBot_AI', 'brain', botsessiondata)
botai = SpiceBot_AI()
|
nilq/baby-python
|
python
|
"""
TODO TESTS:
- Syntax errors,
- general tests
"""
from helper import (
ValueChecker,
FlaskValueCheckerSyntaxError,
FlaskValueCheckerValueError,
)
import random
import string
import pytest
import io
test_restriction_code = """
# some simple data for tests here
firstName : str/lenlim(5, 15) # a random comment
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
team : str/accept(["red", "blue", "yellow", "green", "orange"])
acceptTermsAndConditions : str/accept(['on'])/optional
someEdgeCase : str/accept(['on'])
"""
checker = ValueChecker(test_restriction_code)
sample_test_dict = {
"firstName": "Garyashver",
"email": "GaryBob@Dan.com",
"phone": "9120921022",
"age": "76",
"password": "12345678",
"team": "red",
"someEdgeCase": "on",
"needed_file": (io.BytesIO(b"something"), "file.txt"),
"optional_file": (io.BytesIO(b"something"), "other_file.txt"),
}
def create_sample_dict(modifications=None):
modifications = {} if modifications is None else modifications
test_dict = sample_test_dict.copy()
for key, value in modifications.items():
if value is None:
if key in test_dict:
del test_dict[key]
else:
test_dict[key] = value
return test_dict
def run_tests_for_param(param, tests, pre_func=None):
for test in tests:
pre_value, expected_output = test
if pre_func:
value = pre_func(pre_value)
else:
value = pre_value
test_dict = create_sample_dict({param: value})
errs = checker.check_for(test_dict)
bad_err_text = f"""
param : {param},
pre_value : {pre_value},
value : {value},
expected_output : {expected_output},
"""
if expected_output is None:
assert errs is None, bad_err_text
else:
assert errs[param] == expected_output, bad_err_text
def create_rand_text(length, max_len=None):
"""
create random text for a specific length,
if max_len is specified creates a random piece of text
which is of a random length between length and max_len
"""
if max_len is not None:
length = random.randint(length, max_len)
to_ret_string = ""
for _ in range(length):
to_ret_string += random.choice(string.printable)
return to_ret_string
def test_simple_pass():
error = checker.check_for(sample_test_dict)
assert error is None
def test_simple_fail():
test_dict = create_sample_dict({"age": None})
errors = checker.check_for(test_dict)
assert errors is not None
fields = errors
assert "age" in fields
assert len(fields.items()) == 1
def test_optional_field():
test_dict = create_sample_dict({"middleName": "sarah"})
errors = checker.check_for(test_dict)
assert errors is None
test_dict = create_sample_dict({})
errors = checker.check_for(test_dict)
assert errors is None
def test_string_length_limits():
def pre_func(val):
if type(val) != tuple:
val = (val,)
return create_rand_text(*val)
# tests are run on the modif_param
modif_param = "firstName"
invalid_range_err = "string length must be between 5 and 15"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[(0, 4), invalid_range_err],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(16, 1000), invalid_range_err],
[(16, 1000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "middleName"
invalid_range_err = "string length must be between 5 and inf"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[(0, 4), invalid_range_err],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(15, 1000), None],
[(15, 1000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "lastName"
invalid_range_err = ""
# tests represent parameters, text_len, expected_output_error
tests = [
[2, None],
[3, None],
[(0, 4), None],
[5, None],
[(5, 15), None],
[(5, 15), None],
[(15, 1000), None],
[(15, 1000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
def test_string_accept():
modif_param = "team"
invalid_value_error = (
"value must be one from the list ['red', 'blue', 'yellow', 'green', 'orange']"
)
tests = [
["red", None],
["blue", None],
["Green", invalid_value_error],
["iojoidas", invalid_value_error],
["", invalid_value_error],
]
run_tests_for_param(modif_param, tests)
modif_param = "acceptTermsAndConditions"
invalid_value_error = "value should be 'on', or the field should not be submitted"
tests = [
["on", None],
[None, None],
["avcdscs", invalid_value_error],
["", invalid_value_error],
]
run_tests_for_param(modif_param, tests)
modif_param = "someEdgeCase"
invalid_value_error = "value should be 'on'"
tests = [
["on", None],
["avcdscs", invalid_value_error],
["", invalid_value_error],
[None, invalid_value_error],
]
run_tests_for_param(modif_param, tests)
def test_int_limits():
def pre_func(val):
if type(val) != tuple:
return val
return random.randint(*val)
# tests are run on the modif_param
modif_param = "age"
invalid_range_err = "value must be between 18.0 and 99.0"
# tests represent parameters, text_len, expected_output_error
tests = [
[2, invalid_range_err],
[3, invalid_range_err],
[-4, invalid_range_err],
[-7, invalid_range_err],
[(-1000, 17), invalid_range_err],
[18, None], # edge case
[(18, 99), None],
[(18, 99), None],
[99, None], # edge case
[(100, 1000), invalid_range_err],
[(100, 1000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "height"
invalid_range_err = "value must be between 1.0 and inf"
# tests represent parameters, text_len, expected_output_error
tests = [
[1, None], # edge case
[2, None],
[3, None],
[-4, invalid_range_err],
[-7, invalid_range_err],
[(-10000, 0), invalid_range_err],
[(15, 99), None],
[(15, 99), None],
[99, None],
[(100, 10000), None],
[(100, 10000), None],
]
run_tests_for_param(modif_param, tests, pre_func)
# tests are run on the modif_param
modif_param = "someNegativeFloat"
invalid_range_err = "value must be between -inf and 0.0"
# tests represent parameters, text_len, expected_output_error
tests = [
[0, None], # edge case
[(-10000, 0), None],
[(-10000, 0), None],
[(100, 10000), invalid_range_err],
[(100, 10000), invalid_range_err],
]
run_tests_for_param(modif_param, tests, pre_func)
def test_bad_syntax():
bad_syntax_1 = """
middleName : str/lenlim(5, inf)/optional
# bad syntax over here, end bracket is missing
firstName : str/lenlim(5, 15
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_1)
bad_syntax_2 = """
# bad syntax over here, 3 parameters instead of 2
firstName : str/lenlim(5, 15, 56)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
password : str/lenlim(8, 15)
phone : str/lenlim(8, 15)
age : int/lim(18, 99)
height : float/lim(1, inf)/optional
someNegativeFloat : float/optional/lim(-inf, 0)
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_2)
bad_syntax_3 = """
# bad syntax over here, 1 parameter instead of 2
firstName : str/lenlim(5)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_3)
bad_syntax_4 = """
# bad parameter name here
firstName : str/blablabla
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_4)
bad_syntax_5 = """
# bad parameter name here
firstName : str/accept([,])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_5)
bad_syntax_6 = """
# bad parameter name here
firstName : str/accept([abc)
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_6)
bad_syntax_7 = """
# bad parameter name here
firstName : str/accept(["abc'])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerSyntaxError):
checker = ValueChecker(bad_syntax_7)
bad_syntax_8 = """
# bad parameter name here
firstName : str/accept(["abc", 124])
middleName : str/lenlim(5, inf)/optional
lastName : str/optional
email : str
"""
with pytest.raises(FlaskValueCheckerValueError):
checker = ValueChecker(bad_syntax_8)
|
nilq/baby-python
|
python
|
import hashlib
string1 = 'Teste inicial'.encode('utf-8')
string2 = 'Teste inicial'.encode('utf-8')
hash1 = hashlib.new('ripemd160')
hash1.update(string1)
hash2 = hashlib.new('ripemd160')
hash2.update(string2)
print("-" * 60)
print(hash1.hexdigest())
print(hash2.hexdigest())
if hash1.digest() == hash2.digest():
print("\nA string 1 é igual a string 2")
else:
print("\nA string 1 é diferente a string 2")
|
nilq/baby-python
|
python
|
from django.conf.urls import url
import lessons.views
urlpatterns = (
url(r'^create/(?P<course_id>\d+)$', lessons.views.schedule_create_page,
name="lessons.views.schedule_create_page"),
url(r'^edit/(?P<lesson_id>\d+)$', lessons.views.schedule_edit_page,
name="lessons.views.schedule_edit_page"),
)
|
nilq/baby-python
|
python
|
import warnings
import numpy as np
from skimage.restoration import denoise_wavelet
def apply_rolling_window(mainchunk: np.array, meterchunk: np.array, window_size: int):
if not window_size:
raise Warning('Window size is not defined.')
indexer = np.arange(window_size)[None, :] + np.arange(len(mainchunk) - window_size + 1)[:, None]
mainchunk = mainchunk[indexer]
meterchunk = meterchunk[window_size - 1:]
return mainchunk, meterchunk
def apply_midpoint_window(mainchunk: np.array, meterchunk: np.array, window_size: int):
if not window_size:
raise Warning('Window size is not defined.')
indexer = np.arange(window_size)[None, :] + np.arange(len(mainchunk) - window_size + 1)[:, None]
mainchunk = mainchunk[indexer]
midpoint = window_size // 2
meterchunk = meterchunk[midpoint: len(mainchunk) + midpoint]
return mainchunk, meterchunk
def apply_sequence_to_subsequence(mainchunk: np.array, meterchunk: np.array, sequence_window: int,
subsequence_window: int):
if not sequence_window:
raise Warning('Sequence window is not defined.')
if not subsequence_window:
warnings.warn('Sub sequence window is not defined. So the 20% of sequence window was used.')
subsequence_window = int(sequence_window * 0.2)
upper_limit = (sequence_window + subsequence_window) // 2
lower_limit = (sequence_window - subsequence_window) // 2
sequence_indexer = np.arange(sequence_window)[None, :] + np.arange(len(mainchunk) - sequence_window + 1)[:, None]
mainchunk = mainchunk[sequence_indexer]
subsequence_indexer = np.arange(sequence_window)[lower_limit: upper_limit] + np.arange(len(mainchunk))[:, None]
meterchunk = meterchunk[subsequence_indexer]
return mainchunk, meterchunk
def apply_sequence_to_sequence(mainchunk: np.array, meterchunk: np.array, sequence_window: int):
if not sequence_window:
raise Warning('Sequence window is not defined.')
sequence_indexer = np.arange(sequence_window)[None, :] + np.arange(len(mainchunk) - sequence_window + 1)[:, None]
mainchunk = mainchunk[sequence_indexer]
meterchunk = meterchunk[sequence_indexer]
return mainchunk, meterchunk
def create_batches(mainchunk: np.array, meterchunk: np.array, seq_len: int):
ix = mainchunk.index
additional = seq_len - (len(ix) % seq_len)
mainchunk = np.append(mainchunk, np.zeros(additional))
meterchunk = np.append(meterchunk, np.zeros(additional))
mainchunk = np.reshape(mainchunk, (int(len(mainchunk) / seq_len), seq_len, 1))
meterchunk = np.reshape(meterchunk, (int(len(meterchunk) / seq_len), seq_len, 1))
mainchunk = np.transpose(mainchunk, (0, 2, 1))
meterchunk = np.transpose(meterchunk, (0, 2, 1))
return mainchunk, meterchunk
def replace_nans(mainchunk: np.array, meterchunk: np.array):
mainchunk.fillna(0, inplace=True)
meterchunk.fillna(0, inplace=True)
return mainchunk, meterchunk
def replace_nans_interpolation(mainchunk: np.array, meterchunk: np.array):
mainchunk.interpolate(method='linear', limit_direction='forward', inplace=True)
meterchunk.interpolate(method='linear', limit_direction='forward', inplace=True)
return mainchunk, meterchunk
def normalize_chunks(mainchunk: np.array, meterchunk: np.array, mmax: float):
if mmax is None:
mmax = mainchunk.max()
mainchunk = mainchunk / mmax
meterchunk = meterchunk / mmax
return mainchunk, meterchunk
def standardize_chunks(mainchunk: np.array, meterchunk: np.array, mains_mean: float,
mains_std: float, meter_mean: float, meter_std: float):
if mains_mean is None and mains_std is None:
mains_mean = mainchunk.mean()
mains_std = mainchunk.std()
if meter_mean is None and meter_std is None:
meter_mean = meterchunk.mean()
meter_std = meterchunk.std()
mainchunk = (mainchunk - mains_mean) / mains_std
meterchunk = (meterchunk - meter_mean) / meter_std
return mainchunk, meterchunk
def is_bad_chunk(chunk: np.array):
return (chunk == 0).all()
def align_chunks(mainchunk: np.array, meterchunk: np.array):
mainchunk = mainchunk[~mainchunk.index.duplicated()]
meterchunk = meterchunk[~meterchunk.index.duplicated()]
ix = mainchunk.index.intersection(meterchunk.index)
mainchunk = mainchunk[ix]
meterchunk = meterchunk[ix]
return mainchunk, meterchunk
def replace_with_zero_small_values(mainchunk: np.array, meterchunk: np.array, threshold: int):
mainchunk[mainchunk < threshold] = 0
meterchunk[meterchunk < threshold] = 0
return mainchunk, meterchunk
def denoise(mainchunk: np.array, meterchunk: np.array):
mainchunk = denoise_wavelet(mainchunk, wavelet='haar', wavelet_levels=3)
meterchunk = denoise_wavelet(meterchunk, wavelet='haar', wavelet_levels=3)
return mainchunk, meterchunk
def add_gaussian_noise(mainchunk: np.array, noise_factor: float = 0.1):
noise = noise_factor * np.random.normal(0, 1, mainchunk.shape)
mainchunk = mainchunk + noise
return mainchunk
|
nilq/baby-python
|
python
|
#pylint: disable=line-too-long,broad-except
"""Calculates total time from calendar events, grouped by an event attribute.
Usage:
calcatime -c <calendar_uri> [-d <domain>] -u <username> -p <password> <timespan>... [--by <event_attr>] [--include-zero] [--json] [--debug]
Options:
-h, --help Show this help
-V, --version Show command version
-c <calendar_uri> Calendar provider:server uri
↓ See Calendar Providers
-d <domain> Domain name
-u <username> User name
-p <password> Password
<timespan> Only include events in given time span
↓ See Timespan Options
--by=<event_attr> Group total times by given event attribute
↓ See Event Attributes
--include-zero Include zero totals in output
--json Output data to json; default is csv
--debug Extended debug logging
Examples:
$ calcatime -c "office365" -u "email@company.com" -p $password last week --json
Calendar Providers:
Microsoft Exchange: exchange:<server url>
Office365: office365[:<server url>]
default server url = outlook.office365.com
Timespan Options:
today
yesterday
week (current)
month (current)
year (current)
monday | mon
tuesday | tue
wednesday | wed
thursday | thu
friday | fri
saturday | sat
sunday | sun
last (can be used multiple times e.g. last last week)
next (can be used multiple times e.g. next next week)
Event Grouping Attributes:
category[:<regex_pattern>]
title[:<regex_pattern>]
"""
# python native modules
import sys
import re
import json
import calendar
from enum import Enum
from datetime import datetime, timedelta
from collections import namedtuple
from typing import Dict, List, Optional, Tuple, Iterator
# third-party modules
from docopt import docopt
__version__ = '0.5'
# Configs ---------------------------------------------------------------------
# default format used for outputting datetime values
DATETIME_FORMAT = '%Y-%m-%d'
# Data types ------------------------------------------------------------------
# tuple for command line arguments
Configs = namedtuple('Configs', [
'calendar_provider',
'username',
'password',
'range_start',
'range_end',
'domain',
'grouping_attr',
'include_zero',
'output_type'
])
# tuple for holding calendar event properties
# irrelevant of the calendar provider
CalendarEvent = namedtuple('CalendarEvent', [
'title',
'start',
'end',
'duration',
'categories'
])
# tuple for calendar provider configs
CalendarProvider = namedtuple('CalendarProvider', [
'name',
'prefix',
'server',
'supports_categories'
])
# calendar providers enum
class CalendarProviders(Enum):
"""Supported calendar providers"""
# microsoft exchange server, server url must be provided
Exchange: CalendarProvider = \
CalendarProvider(name='Microsoft Exchange',
prefix='exchange',
server='',
supports_categories=True)
# microsoft Office365, default url is provided
Office365: CalendarProvider = \
CalendarProvider(name='Office365',
prefix='office365',
server='outlook.office365.com',
supports_categories=True)
# Functions -------------------------------------------------------------------
def get_providers() -> List[CalendarProvider]:
"""Get list of supported providers."""
return [x.value for x in CalendarProviders]
def get_provider(connection_string: str) -> CalendarProvider:
"""Get provider configs from connection string."""
# determine calendar provider
if connection_string:
connstr = connection_string.lower()
for calprov in get_providers():
if calprov.prefix in connstr:
# grab server url from connection string
calserver = None
match = \
re.search(f'{calprov.prefix}:(.+)?', connstr, re.IGNORECASE)
if match:
calserver = match.group(1)
if not calprov.server and not calserver:
raise Exception('Calendar provider server url is required.')
# create provider configs
return CalendarProvider(
name=calprov.name,
prefix=calprov.prefix,
server=calserver or calprov.server,
supports_categories=calprov.supports_categories
)
raise Exception('Calendar provider is not supported.')
def parse_configs() -> Configs:
"""Parse command line arguments and return configs"""
# process command line args
args = docopt(__doc__, version='calcatime {}'.format(__version__))
# extended debug?
if args.get('--debug'):
import logging
from exchangelib.util import PrettyXmlHandler
logging.basicConfig(level=logging.DEBUG, handlers=[PrettyXmlHandler()])
# determine calendar provider
calprovider = get_provider(args.get('-c', None))
# determine credentials
username = args.get('-u', None)
password = args.get('-p', None)
if not username or not password:
raise Exception('Calendar access credentials are required.')
# get domain if provided
domain = args.get('-d', None)
# determine grouping attribute, set defaults if not provided
grouping_attr = args.get('--by', None)
if not grouping_attr:
if calprovider.supports_categories:
grouping_attr = 'category'
else:
grouping_attr = 'title'
# determine if zeros need to be included
include_zero = args.get('--include-zero', False)
# determine output type, defaults to csv
json_out = args.get('--json', False)
# determine requested time span
start, end = parse_timerange_tokens(
args.get('<timespan>', [])
)
return Configs(
calendar_provider=calprovider,
username=username,
password=password,
range_start=start,
range_end=end,
domain=domain,
grouping_attr=grouping_attr,
include_zero=include_zero,
output_type='json' if json_out else 'csv'
)
def parse_timerange_tokens(timespan_tokens: List[str]) -> Tuple[datetime, datetime]:
"""Return start and end of the range specified by tokens."""
# collect today info
today = datetime.today()
today_start = datetime(today.year, today.month, today.day, 0, 0)
today_end = today_start + timedelta(days=1)
# calculate this week start date
week_start = today_start - timedelta(days=today_start.weekday())
# count the number of times 'last' token is provided
# remove 7 days for each count
last_count = timespan_tokens.count('last')
last_offset = -7 * last_count
# count the number of times 'next' token is provided
# add 7 days for each count
next_count = timespan_tokens.count('next')
next_offset = 7 * next_count
offset = last_offset + next_offset
# now process the known tokens
if 'today' in timespan_tokens:
return (today_start + timedelta(days=offset),
today_end + timedelta(days=offset))
elif 'yesterday' in timespan_tokens:
return (today_start + timedelta(days=-1 + offset),
today_end + timedelta(days=-1 + offset))
elif 'week' in timespan_tokens:
return (week_start + timedelta(days=offset),
week_start + timedelta(days=7 + offset))
elif 'month' in timespan_tokens:
month_index = today.month + (-last_count + next_count)
month_index = month_index if month_index >= 1 else 12
month_start = datetime(today.year, month_index, 1)
month_end = datetime(today.year, month_index + 1, 1) + timedelta(-1)
return (month_start, month_end)
elif 'year' in timespan_tokens:
year_number = today.year + (-last_count + next_count)
year_start = datetime(year_number, 1, 1)
year_end = datetime(year_number + 1, 1, 1) + timedelta(-1)
return (year_start, year_end)
elif 'decade' in timespan_tokens:
raise NotImplementedError()
elif 'century' in timespan_tokens:
raise NotImplementedError()
elif 'millennium' in timespan_tokens:
raise NotImplementedError()
# process week days
for idx, day_names in enumerate(
zip(map(str.lower, list(calendar.day_name)),
map(str.lower, list(calendar.day_abbr)))):
if any(x in timespan_tokens for x in day_names):
range_start = week_start + timedelta(days=idx + offset)
range_end = week_start + timedelta(days=idx + 1 + offset)
return (range_start, range_end)
raise Exception('Can not determine time span.')
def collect_events(configs: Configs) -> List[CalendarEvent]:
"""Use calendar provider API to collect events within given range."""
# collect events from calendar
events: List[CalendarEvent] = []
provider = configs.calendar_provider
# if provider uses exchange api:
if provider.name == CalendarProviders.Exchange.name \
or provider.name == CalendarProviders.Office365.name:
events = get_exchange_events(
server=provider.server,
domain=configs.domain,
username=configs.username,
password=configs.password,
range_start=configs.range_start,
range_end=configs.range_end
)
# otherwise the api is not implemented
else:
raise Exception('Calendar provider API is not yet implemented.')
return events
def get_exchange_events(server: str,
domain: Optional[str],
username: str,
password: str,
range_start: datetime,
range_end: datetime) -> List[CalendarEvent]:
"""Connect to exchange calendar server and get events within range."""
# load exchange module if necessary
from exchangelib import Credentials, Configuration, Account, DELEGATE
from exchangelib import EWSDateTime, EWSTimeZone
# setup access
full_username = r'{}\{}'.format(domain, username) if domain else username
account = Account(
primary_smtp_address=username,
config=Configuration(server=server,
credentials=Credentials(full_username, password)),
autodiscover=False,
access_type=DELEGATE
)
# collect event information within given time range
events: List[CalendarEvent] = []
localzone = EWSTimeZone.localzone()
local_start = localzone.localize(EWSDateTime.from_datetime(range_start))
local_end = localzone.localize(EWSDateTime.from_datetime(range_end))
for item in account.calendar.filter( ##pylint: disable=no-member
start__range=(local_start, local_end)).order_by('start'):
events.append(
CalendarEvent(
title=item.subject,
start=item.start,
end=item.end,
duration=(item.end - item.start).seconds / 3600,
categories=item.categories
))
return events
def group_events(events: List[CalendarEvent],
configs: Configs)-> Dict[str, List[CalendarEvent]]:
"""Group events by given attribute."""
# group events
grouped_events: Dict[str, List[CalendarEvent]] = {}
group_attr = configs.grouping_attr
if events:
if group_attr.startswith('category:'):
_, pattern = group_attr.split(':')
if pattern:
grouped_events = \
group_by_pattern(events, pattern, attr='category')
elif group_attr == 'category':
grouped_events = \
group_by_category(events)
elif group_attr.startswith('title:'):
_, pattern = group_attr.split(':')
if pattern:
grouped_events = \
group_by_pattern(events, pattern, attr='title')
elif group_attr == 'title':
grouped_events = \
group_by_title(events)
return grouped_events
def group_by_title(
events: List[CalendarEvent]) -> Dict[str, List[CalendarEvent]]:
"""Group given events by event title."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
if event.title in grouped_events:
grouped_events[event.title].append(event)
else:
grouped_events[event.title] = [event]
return grouped_events
def group_by_category(events: List[CalendarEvent],
unknown_group='---') -> Dict[str, List[CalendarEvent]]:
"""Group given events by event category."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
if event.categories:
for cat in event.categories:
if cat in grouped_events:
grouped_events[cat].append(event)
else:
grouped_events[cat] = [event]
else:
if unknown_group in grouped_events:
grouped_events[unknown_group].append(event)
else:
grouped_events[unknown_group] = [event]
return grouped_events
def group_by_pattern(events: List[CalendarEvent],
pattern: str,
attr: str = 'title') -> Dict[str, List[CalendarEvent]]:
"""Group given events by given regex pattern and target attribute."""
grouped_events: Dict[str, List[CalendarEvent]] = {}
for event in events:
target_tokens = []
if attr == 'title':
target_tokens.append(event.title)
elif attr == 'category':
target_tokens = event.categories
if target_tokens:
for token in target_tokens or []:
match = re.search(pattern, token, flags=re.IGNORECASE)
if match:
matched_token = match.group()
if matched_token in grouped_events:
grouped_events[matched_token].append(event)
else:
grouped_events[matched_token] = [event]
break
return grouped_events
def cal_total_duration(
grouped_events: Dict[str, List[CalendarEvent]]) -> Dict[str, float]:
"""Calculate total duration of events in each group."""
hours_per_group: Dict[str, float] = {}
for event_group, events in grouped_events.items():
total_duration = 0
for event in events:
total_duration += event.duration
hours_per_group[event_group] = total_duration
return hours_per_group
def calculate_and_dump(grouped_events: Dict[str, List[CalendarEvent]],
configs: Configs):
"""Calculate totals and dump event data."""
total_durations = cal_total_duration(grouped_events)
calculated_data: List[Dict] = []
for event_group in grouped_events:
if not configs.include_zero and total_durations[event_group] == 0:
continue
calculated_data.append({
'start': configs.range_start.strftime(DATETIME_FORMAT),
'end': configs.range_end.strftime(DATETIME_FORMAT),
'group': event_group,
'duration': total_durations[event_group]
})
if configs.output_type == 'json':
print(json.dumps(calculated_data))
elif configs.output_type == 'csv':
print('"start","end","group","duration"')
for data in calculated_data:
print(','.join([
'"{}"'.format(data['start']),
'"{}"'.format(data['end']),
'"{}"'.format(data['group']),
str(data['duration'])
]))
# Main ------------------------------------------------------------------------
def main():
"""Parse arguments, parse time span, get and organize events, dump data."""
# get configs
configs = parse_configs()
# collect events
events = collect_events(configs)
# groups events by attribute
grouped_events = group_events(events, configs)
# prepare and dump data
calculate_and_dump(grouped_events, configs)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
db_fun.py
This module contains helper functions for database entry creation.
"""
from models import Resource, Category
from datetime import datetime
def get_or_create(session, model, **kwargs):
"""
Determines if a given record already exists in the database.
Args:
session: The database session.
model: The model for the record.
**kwargs: The properties to set on the model. The first
specified property will be used to determine if
the model already exists.
Returns:
Two values. The first value is a boolean
indicating if this item is a new record. The second
value will be the created/retrieved model.
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return False, instance
else:
instance = model(**kwargs)
return True, instance
def add_get_or_create(session, model, **kwargs):
"""
Gets or creates an record based on if it already exists.
If it does not already exist, it will be created.
Args:
session: The database session.
model: The model to get or create.
**kwargs: The properties to set on the model. The first
specified property will be used to determine if
the model already exists.
Returns:
Two values. The first value is a boolean
indicating if this item is a new record. The second
value will be the created/retrieved model.
"""
new_record, record = get_or_create(session, model, **kwargs)
if new_record:
session.add(record)
return new_record, record
def try_add_categories(session, record, category_names, create_categories=True):
"""
Attempts to add the list of provided categories to the resource.
Args:
session: The current database context.
record: The resource to update.
category_names: The list of category names to add
create_categories: If true, will create categories if they don't already exist.
If false, will skip over listed categories that don't already exist.
Defaults to true.
"""
for category_name in category_names:
normalized_name = category_name.strip()
# Are we allowing categories to be created?
if create_categories:
# Try to look up the name of the provided category,
# get/create as necessary
new_category, category_record = add_get_or_create(session,
Category,
name=normalized_name)
else:
# Only look up the category - return None
# if we don't have one
category_record = session.query(Category). \
filter(Category.name == normalized_name). \
first()
# Make sure we got something back and we're not double-adding
if category_record and not category_record in record.categories:
record.categories.append(category_record)
def get_or_create_resource(session, rad_record, lazy=True, create_categories=True):
"""
Checks to see if a resource already exists in the database
and adds it if it does not exist (or is forced to by use of
the lazy argument).
Args:
session: The current database session.
rad_record: The RadRecord to be added.
lazy: If false, forces the record to be added even if it is a duplicate.
Defaults to true.
create_categories: If true, will create categories if they don't already exist.
If false, will skip over listed categories that don't already exist.
Defaults to true.
Returns:
Two values. The first value is a boolean
indicating if a new record was created. The second
value will be the created/updated model.
"""
# Just create a new record always if we're lazy-loading. This avoids
# weirdness in which we're partially updating an item.
if lazy:
new_record = True
record = Resource(name=rad_record.name.strip())
session.add(record)
else:
new_record, record = get_or_create(session, Resource, name=rad_record.name.strip())
record.last_updated = datetime.utcnow()
if new_record:
record.date_created = datetime.utcnow()
if new_record or not lazy:
# See if we have just a normal address field - if not,
# manually construct one by joining all available
# fields with commas
new_address = ''
if hasattr(rad_record, 'address') and \
rad_record.address is not None and \
rad_record.address != '' and \
not rad_record.address.isspace():
new_address = rad_record.address.strip()
else:
new_address = ", ".join(a.strip() for a in [rad_record.street,
rad_record.city, rad_record.state,
rad_record.zipcode, rad_record.country]
if a is not None and a != '' and not a.isspace())
# Address issue 131 - if we're updating an existing
# record, and are changing the address (using a lowercase comparison),
# invalidate the existing geocoding information.
if not new_record and \
record.address is not None and \
record.address.lower() != new_address.lower():
record.latitude = None
record.longitude = None
record.location = None
# Now set the new address
if new_address != '' and not new_address.isspace():
record.address = new_address
else:
record.address = None
# Try to parse out the date_verified field if it's provided
if rad_record.date_verified is not None and \
len(rad_record.date_verified) > 0 and \
not rad_record.date_verified.isspace():
# Try to parse it out using 'YYYY-MM-DD'
try:
record.date_verified = datetime.strptime(rad_record.date_verified,
'%Y-%m-%d').date()
except ValueError:
# Parsing error, clear it out
record.date_verified = None
else:
# Not provided - clear it out
record.date_verified = None
# Copy over all the other fields verbatim
record.organization = rad_record.organization
record.description = rad_record.description
record.email = rad_record.email
record.phone = rad_record.phone
record.fax = rad_record.fax
record.url = rad_record.url
record.hours = rad_record.hours
record.source = rad_record.source
record.npi = rad_record.npi
record.notes = rad_record.notes
record.visible = rad_record.visible
# Do we have a list of category names?
# Failing that, do we have a single category name?
if hasattr(rad_record, 'category_names') and \
rad_record.category_names is not None and \
len(rad_record.category_names) > 0:
# Use the list of category names
try_add_categories(session, record, rad_record.category_names, create_categories)
elif hasattr(rad_record, 'category_name') and \
rad_record.category_name is not None and \
not rad_record.category_name.isspace():
# Use the single category name
try_add_categories(session, record, [rad_record.category_name], create_categories)
session.add(record)
# Flush the session because otherwise we won't pick up
# duplicates with UNIQUE constraints (such as in category names)
# until we get an error trying to commit such duplicates
# (which is bad)
session.flush()
return new_record, record
|
nilq/baby-python
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import math
import re
import signal
from typing import Dict, List, Tuple
import numpy as np
import psutil
MEMORY_SIZE_UNITS = {"K": 2**10, "M": 2**20, "G": 2**30, "T": 2**40}
# we use 4 bytes for block size, this means each block can contain
# 4294967296 records
BLOCK_SIZE_BIT = 32
def get_node_address() -> str:
"""
Get the ip address used in ray.
"""
pids = psutil.pids()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
for arglist in proc.cmdline():
for arg in arglist.split(" "):
if arg.startswith("--node-ip-address"):
addr = arg.split("=")[1]
return addr
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
raise Exception("can't find any ray process")
def register_exit_handler(func):
atexit.register(func)
signal.signal(signal.SIGTERM, func)
signal.signal(signal.SIGINT, func)
def random_split(df, weights, seed=None):
"""
Random split the spark DataFrame or koalas DataFrame into given part
:param df: the spark DataFrame or koalas DataFrame
:param weights: list of doubles as weights with which to split the df.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
"""
# convert to Spark DataFrame
df, is_spark_df = convert_to_spark(df)
splits = df.randomSplit(weights, seed)
if is_spark_df:
return splits
else:
# convert back to koalas DataFrame
import databricks.koalas as ks # pylint: disable=C0415
return [ks.DataFrame(split) for split in splits]
def _df_helper(df, spark_callback, koalas_callback):
try:
import pyspark # pylint: disable=C0415
except Exception:
pass
else:
if isinstance(df, pyspark.sql.DataFrame):
return spark_callback(df)
try:
import databricks.koalas as ks # pylint: disable=C0415
except Exception:
pass
else:
if isinstance(df, ks.DataFrame):
return koalas_callback(df)
raise Exception(f"The type: {type(df)} is not supported, only support "
"pyspark.sql.DataFrame and databricks.koalas.DataFrame")
def df_type_check(df):
"""
Check whether the df is spark DataFrame or koalas DataFrame.
:return True for spark DataFrame or Koalas DataFrame.
:raise Exception when it is neither spark DataFrame nor Koalas DataFrame.
"""
return _df_helper(df, lambda d: True, lambda d: True)
def convert_to_spark(df):
"""
Do nothing if the df is spark DataFrame, convert to spark DataFrame if it is
koalas DataFrame. Raise Exception otherwise.
:return: a pair of (converted df, whether it is spark DataFrame)
"""
return _df_helper(df, lambda d: (d, True), lambda d: (d.to_spark(), False))
def parse_memory_size(memory_size: str) -> int:
"""
Parse the human readable memory size into bytes.
Adapt from: https://stackoverflow.com/a/60708339
:param memory_size: human readable memory size
:return: convert to int size
"""
memory_size = memory_size.strip().upper()
if re.search(r"B", memory_size):
# discard "B"
memory_size = re.sub(r"B", "", memory_size)
try:
return int(memory_size)
except ValueError:
pass
global MEMORY_SIZE_UNITS
if not re.search(r" ", memory_size):
memory_size = re.sub(r"([KMGT]+)", r" \1", memory_size)
number, unit_index = [item.strip() for item in memory_size.split()]
return int(float(number) * MEMORY_SIZE_UNITS[unit_index])
def divide_blocks(
blocks: List[int],
world_size: int,
shuffle: bool = False,
shuffle_seed: int = None) -> Dict[int, List[int]]:
"""
Divide the blocks into world_size partitions, and return the divided block indexes for the
given work_rank
:param blocks: the blocks and each item is the given block size
:param world_size: total world size
:param shuffle: whether shuffle the blocks before divide
:param shuffle_seed: the shuffle seed
:return: a dict, the key is the world rank, and the value is a list of pair of block index
and the samples selected in that block
"""
if len(blocks) < world_size:
raise Exception("do not have enough blocks to divide")
results = {}
# number of blocks per rank
num_blocks_per_rank = int(math.ceil(len(blocks) * 1.0 / world_size))
# number of samples per rank
num_samples_per_rank = int(math.ceil(sum(blocks) * 1.0 / world_size))
# total number of blocks
total_num_blocks = num_blocks_per_rank * world_size
# global block indexes
global_indexes = list(range(len(blocks)))
# add extra blocks to make it evenly divisible
if len(global_indexes) != total_num_blocks:
global_indexes += global_indexes[: (total_num_blocks - len(global_indexes))]
assert len(global_indexes) == total_num_blocks
if shuffle_seed:
np.random.seed(shuffle_seed)
else:
np.random.seed(0)
if shuffle:
np.random.shuffle(global_indexes)
def select(index: int, current_size: int, selected: List[Tuple[int, int]]) -> int:
block_size = blocks[index]
tmp = current_size + block_size
if tmp < num_samples_per_rank:
selected.append((index, block_size))
current_size = tmp
elif tmp >= num_samples_per_rank:
selected.append((index, (num_samples_per_rank - current_size)))
current_size = num_samples_per_rank
return current_size
for rank in range(world_size):
indexes = global_indexes[rank: total_num_blocks: world_size]
assert len(indexes) == num_blocks_per_rank
samples_cur_rank = 0
selected_indexes = []
for i in indexes:
samples_cur_rank = select(i, samples_cur_rank, selected_indexes)
if samples_cur_rank == num_samples_per_rank:
break
while samples_cur_rank < num_samples_per_rank:
index = np.random.choice(global_indexes, size=1)[0]
samples_cur_rank = select(index, samples_cur_rank, selected_indexes)
assert samples_cur_rank == num_samples_per_rank
results[rank] = selected_indexes
return results
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
from setuptools import setup, find_packages
from urllib.parse import urlparse
with open('requirements.txt', 'r') as f:
install_requires = []
dependency_links = []
append_version = '-' + str(sys.maxsize)
requirements = [ line.strip() for line in f ]
for requirement in requirements:
name = urlparse(requirement)
if name.scheme and name.netloc:
install_requires.append(name.fragment.replace('egg=', ''))
dependency_links.append(requirement + append_version)
else:
install_requires.append(requirement)
setup(name="kochira",
version="0.0",
description="kochira",
author="",
author_email="",
url="",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="kochira",
install_requires=install_requires,
dependency_links=dependency_links,
entry_points="""\
[console_scripts]
kochira = kochira:main
"""
)
|
nilq/baby-python
|
python
|
from flask import session, request
from flask_restful import Resource, reqparse, inputs, abort
from api.common.database import database
from api.common.utils import checkTag, checkTime, checkTel
import json
import requests
'''
### sendOfflineCapsule
Use this method to send offline capsule.
HTTP Request Method: **POST**
| Field | Type | Required | Description |
|---------------|---------|----------|----------------------------------------------------------------|
| sender_name | String | Yes | Sender's name. |
| sender_tel | String | Yes | Sender's telephone number. |
| receiver_name | String | Yes | Receiver's name. |
| receiver_tel | String | Yes | Receiver's telephone number. |
| receiver_addr | String | Yes | Receiver's address. |
| capsule_tag | String | Yes | The tag ID attached on the envelope. |
| period | String | Yes | The period of time capsule. Must be `half-year` or `one-year`. |
| seal | Boolean | Yes | Whether the seal is required. |
'''
parser = reqparse.RequestParser()
parser.add_argument('sender_name', type = str, required = True)
parser.add_argument('sender_tel', type = str, required = True)
parser.add_argument('receiver_name', type = str, required = True)
parser.add_argument('receiver_tel', type = str, required = True)
parser.add_argument('receiver_addr', type = str, required = True)
parser.add_argument('capsule_tag', type = str, required = True)
parser.add_argument('period', type = str, required = True, choices = ('half-year', 'one-year'))
parser.add_argument('seal', type = inputs.boolean, required = True)
class sendOfflineCapsule(Resource):
def post(self):
if checkTime() != 0:
abort(416, message = "Event is not ongoing.")
args = parser.parse_args()
if not checkTel(args["sender_tel"]) or not checkTel(args["receiver_tel"]):
abort(400, message = "Invalid telephone number.")
if checkTag(args["capsule_tag"]) == False:
abort(400, message = "Invalid capsule tag.")
if not database.getTagStatus(args["capsule_tag"]):
abort(409, message = "The capsule tag already exists.")
database.addOfflineCapsule(args["sender_name"], args["sender_tel"], args["receiver_name"], args["receiver_tel"], args["receiver_addr"], args["capsule_tag"], args["period"], args["seal"])
return {
"receiver_name": args["receiver_name"],
"count": database.getStatisticsByTel(args["receiver_tel"])
}
|
nilq/baby-python
|
python
|
""" Read a set of input files for the child oids
and generate a SQL file that queries for the master
records changed by those OID. This one uses an IN
clause instead of the simple query to test relative
performance of the two
I am using the therory that runing the commands
directly from psql should yield
the highest achivable performance since they should have
optimized the command line client.
"""
import sys
import os
def quote(str):
return "\'" + str + "\'"
MaxInItems = 500
# Process input file reading line by line.
# Break it up into chunks and generate
# a psql file with separate insert statements
# for each chunk
def processFile(fname, fout):
fin = open(fname)
hdr = fin.readline()
buf = []
insStr = "INSERT INTO omap(chiloid, chiltbl, paroid, partbl) VALUES"
while True:
dline = fin.readline().strip()
if dline:
flds = dline.split(",")
#print("flds=", flds)
partbl = flds[0]
paroid = flds[1]
chiltbl = flds[2]
chiloid = flds[3]
buf.append(quote(chiloid))
if (len(buf) > MaxInItems) or (not dline):
if len(buf) > 0:
fout.write("SELECT DISTINCT paroid, partbl FROM omap WHERE omap.chiloid IN ( ");
sout = ", ".join(buf)
fout.write(sout)
fout.write(" );\n")
buf = []
else:
break
def printMsg():
print("Usage: python generateInQueries.py inFiName outFiName")
# MAIN
if len(sys.argv) < 3:
raise ValueError('not enough parameters')
foutName = sys.argv[2]
fout = open(foutName, "w")
fout.write("\\c oidmap\n\o data/log/in_query.RESULTS.txt\n")
fnameIn = sys.argv[1]
print ("fnameIn=", fnameIn, "foutName=", foutName)
if not os.path.isfile(fnameIn):
printMsg()
raise ValueError("Could not find file " + str(fnameIn))
processFile(fnameIn, fout)
|
nilq/baby-python
|
python
|
""""""
import os
import sys
import uuid
import bz2
import pickle
import traceback
import zlib
import json
from abc import ABC
from copy import copy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from .base import StopOrder
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade
from vnpy.component.cta_position import CtaPosition
from vnpy.component.cta_policy import CtaPolicy
class CtaTemplate(ABC):
"""CTA策略模板"""
author = ""
parameters = []
variables = []
# 保存委托单编号和相关委托单的字典
# key为委托单编号
# value为该合约相关的委托单
active_orders = {}
def __init__(
self,
cta_engine: Any,
strategy_name: str,
vt_symbol: str,
setting: dict,
):
""""""
self.cta_engine = cta_engine
self.strategy_name = strategy_name
self.vt_symbol = vt_symbol
self.inited = False # 是否初始化完毕
self.trading = False # 是否开始交易
self.pos = 0 # 持仓/仓差
self.entrust = 0 # 是否正在委托, 0, 无委托 , 1, 委托方向是LONG, -1, 委托方向是SHORT
self.tick_dict = {} # 记录所有on_tick传入最新tick
self.active_orders = {}
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.variables.insert(3, "entrust")
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"strategy_name": self.strategy_name,
"vt_symbol": self.vt_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
def get_positions(self):
""" 返回持仓数量"""
pos_list = []
if self.pos > 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "long",
"volume": self.pos
})
elif self.pos < 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "short",
"volume": abs(self.pos)
})
return pos_list
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
@virtual
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
@virtual
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
@virtual
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
def buy(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send buy order to open a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def sell(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send sell order to close a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK sell委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def short(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send short order to open as short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK short委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def cover(self, price: float, volume: float, stop: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send cover order to close a short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK cover委托')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
order_type=order_type,
order_time=order_time,
grid=grid)
def send_order(
self,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool = False,
order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None,
grid: CtaGrid = None
):
"""
Send a new order.
"""
# 兼容cta_strategy的模板,缺省不指定vt_symbol时,使用策略配置的vt_symbol
if vt_symbol == '':
vt_symbol = self.vt_symbol
if not self.trading:
return []
vt_orderids = self.cta_engine.send_order(
strategy=self,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop=stop,
order_type=order_type
)
if order_time is None:
order_time = datetime.now()
for vt_orderid in vt_orderids:
d = {
'direction': direction,
'offset': offset,
'vt_symbol': vt_symbol,
'price': price,
'volume': volume,
'order_type': order_type,
'traded': 0,
'order_time': order_time,
'status': Status.SUBMITTING
}
if grid:
d.update({'grid': grid})
grid.order_ids.append(vt_orderid)
self.active_orders.update({vt_orderid: d})
if direction == Direction.LONG:
self.entrust = 1
elif direction == Direction.SHORT:
self.entrust = -1
return vt_orderids
def cancel_order(self, vt_orderid: str):
"""
Cancel an existing order.
"""
if self.trading:
return self.cta_engine.cancel_order(self, vt_orderid)
return False
def cancel_all(self):
"""
Cancel all orders sent by strategy.
"""
if self.trading:
self.cta_engine.cancel_all(self)
def is_upper_limit(self, symbol):
"""是否涨停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_up is None or tick.limit_up == 0:
return False
if tick.bid_price_1 == tick.limit_up:
return True
def is_lower_limit(self, symbol):
"""是否跌停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_down is None or tick.limit_down == 0:
return False
if tick.ask_price_1 == tick.limit_down:
return True
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.cta_engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
def get_engine_type(self):
"""
Return whether the cta_engine is backtesting or live trading.
"""
return self.cta_engine.get_engine_type()
def load_bar(
self,
days: int,
interval: Interval = Interval.MINUTE,
callback: Callable = None,
):
"""
Load historical bar data for initializing strategy.
"""
if not callback:
callback = self.on_bar
self.cta_engine.load_bar(self.vt_symbol, days, interval, callback)
def load_tick(self, days: int):
"""
Load historical tick data for initializing strategy.
"""
self.cta_engine.load_tick(self.vt_symbol, days, self.on_tick)
def put_event(self):
"""
Put an strategy data event for ui update.
"""
if self.inited:
self.cta_engine.put_strategy_event(self)
def send_email(self, msg):
"""
Send email to default receiver.
"""
if self.inited:
self.cta_engine.send_email(msg, self)
def sync_data(self):
"""
Sync strategy variables value into disk storage.
"""
if self.trading:
self.cta_engine.sync_strategy_data(self)
class CtaFutureTemplate(CtaTemplate):
"""
合约期货模板
"""
price_tick = 1 # 商品的最小价格跳动
symbol_size = 10 # 商品得合约乘数
margin_rate = 0.1 # 商品的保证金
volumn_tick = 1 # 商品最小成交数量
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
activate_market = False
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1)
max_invest_margin = 0 # 资金上限 0,不限制
max_invest_pos = 0 # 单向头寸数量上限 0,不限制
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price', 'margin',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos', 'short_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.price_tick = 1 # 商品的最小价格跳动
self.symbol_size = 10 # 商品得合约乘数
self.margin_rate = 0.1 # 商品的保证金
self.volumn_tick = 1 # 商品最小成交数量
self.cancel_seconds = 120 # 撤单时间(秒)
self.activate_market = False
self.order_type = OrderType.LIMIT
self.backtesting = False
self.cur_datetime: datetime = None # 当前Tick时间
self.cur_tick: TickData = None # 最新的合约tick( vt_symbol)
self.cur_price = None # 当前价(主力合约 vt_symbol)
self.account_pos = None # 当前账号vt_symbol持仓信息
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
self.display_bars = True
super().__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
self.position.maxPos = sys.maxsize
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
self.volumn_tick = self.cta_engine.get_volume_tick(self.vt_symbol)
if self.activate_market:
self.write_log(f'{self.strategy_name}使用市价单委托方式')
self.order_type = OrderType.MARKET
else:
if not self.backtesting:
self.cancel_seconds = 10
self.write_log(f'实盘撤单时间10秒')
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据,最新bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_policy(self):
self.write_log(u'init_policy(),初始化执行逻辑')
if self.policy:
self.policy.load()
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
changed = False
if len(self.gt.up_grids) <= 0:
self.position.short_pos = 0
# 加载已开仓的空单数据,网格JSON
short_grids = self.gt.load(direction=Direction.SHORT, open_status_filter=[True])
if len(short_grids) == 0:
self.write_log(u'没有持久化的空单数据')
self.gt.up_grids = []
else:
self.gt.up_grids = short_grids
for sg in short_grids:
if len(sg.order_ids) > 0 or sg.order_status:
self.write_log(f'重置委托状态:{sg.order_status},清除委托单:{sg.order_ids}')
sg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in sg.order_ids]
sg.order_ids = []
changed = True
self.write_log(u'加载持仓空单[{},价格:{},数量:{}手,开仓时间:{}'
.format(self.vt_symbol, sg.open_price,
sg.volume, sg.open_time))
self.position.short_pos = round(self.position.short_pos - sg.volume, 7)
self.write_log(u'持久化空单,共持仓:{}手'.format(abs(self.position.short_pos)))
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in lg.order_ids]
lg.order_ids = []
changed = True
self.write_log(u'加载持仓多单[{},价格:{},数量:{}手, 开仓时间:{}'
.format(self.vt_symbol, lg.open_price, lg.volume, lg.open_time))
self.position.long_pos = round(self.position.long_pos + lg.volume, 7)
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
self.position.pos = round(self.position.long_pos + self.position.short_pos, 7)
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
if changed:
self.gt.save()
self.display_grids()
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
pos_list.append({'vt_symbol': self.vt_symbol,
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': g.open_price})
if abs(self.position.short_pos) > 0:
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
pos_list.append({'vt_symbol': self.vt_symbol,
'direction': 'short',
'volume': abs(g.volume - g.traded_volume),
'price': g.open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_trade(self, trade: TradeData):
"""交易更新"""
self.write_log(u'{},交易更新:{},当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['margin'] = trade.price * trade.volume * self.cta_engine.get_margin_rate(trade.vt_symbol)
dist_record['symbol'] = trade.vt_symbol
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新,{}'.format(self.cur_datetime, order.__dict__))
if order.vt_orderid in self.active_orders:
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if order.offset == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'{},委托单:{}全部完成'.format(order.time, order.vt_orderid))
order_info = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = order_info.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if order.offset != Offset.OPEN:
grid.open_status = False
grid.close_status = True
if grid.volume < order.traded:
self.write_log(f'网格平仓数量{grid.volume},小于委托单成交数量:{order.volume},修正为:{order.volume}')
grid.volume = order.traded
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
grid.traded_volume = round(grid.traded_volume, 7)
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
# 在策略得活动订单中,移除
self.write_log(f'委托单{order.vt_orderid}完成,从活动订单中移除')
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
:param order:
:return:
"""
self.write_log(u'委托开仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 委托信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分开仓:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = grid.traded_volume
grid.traded_volume = 0
grid.open_status = True
self.write_log(f'开仓完成,grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'委托平仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分平仓成交:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = round(grid.volume - grid.traded_volume, 7)
grid.traded_volume = 0
if grid.volume <= 0:
grid.volume = 0
grid.open_status = False
self.write_log(f'强制全部平仓完成')
else:
self.write_log(f'平仓委托中,撤单完成,部分成交,减少持仓grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
self.write_log(f'停止单触发:{stop_order.__dict__}')
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading and not self.inited:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids(direction=Direction.LONG)
for g in long_grids:
if g.stop_price > 0 and g.stop_price > self.cur_price and g.open_status and not g.order_status:
# 调用平仓模块
self.write_log(u'{} {}当前价:{} 触发多单止损线{},开仓价:{},v:{}'.
format(self.cur_datetime,
self.vt_symbol,
self.cur_price,
g.stop_price,
g.open_price,
g.volume))
if self.grid_sell(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
# 空单网格止损检查
short_grids = self.gt.get_opened_grids(direction=Direction.SHORT)
for g in short_grids:
if g.stop_price > 0 and g.stop_price < self.cur_price and g.open_status and not g.order_status:
# 网格止损
self.write_log(u'{} {}当前价:{} 触发空单止损线:{}, 开仓价:{},v:{}'.
format(self.cur_datetime, self.vt_symbol, self.cur_price, g.stop_price,
g.open_price, g.volume))
if self.grid_cover(g):
self.write_log(u'空单止盈/止损委托成功')
else:
self.write_error(u'委托空单平仓失败')
def grid_buy(self, grid):
"""
事务开多仓
:return:
"""
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
vt_orderids = self.buy(vt_symbol=self.vt_symbol,
price=buy_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务多单,开仓价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price, grid.stop_price))
self.gt.dn_grids.append(grid)
self.gt.save()
return True
else:
self.write_error(u'创建{}事务多单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_short(self, grid):
"""
事务开空仓
:return:
"""
if self.backtesting:
short_price = self.cur_price - self.price_tick
else:
short_price = self.cur_tick.bid_price_1
vt_orderids = self.short(vt_symbol=self.vt_symbol,
price=short_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务空单,事务开空价:{},当前价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, self.cur_price, grid.volume, grid.close_price,
grid.stop_price))
self.gt.up_grids.append(grid)
self.gt.save()
return True
else:
self.write_error(u'创建{}事务空单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_sell(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
"""
self.account_pos = self.cta_engine.get_position(
vt_symbol=self.vt_symbol,
direction=Direction.NET)
if self.account_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.vt_symbol))
return False
"""
# 发出委托卖出单
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
"""
if self.account_pos.volume <= 0:
self.write_error(u'当前{}的净持仓:{},不能平多单'
.format(self.vt_symbol,
self.account_pos.volume))
return False
if self.account_pos.volume < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.vt_symbol,
self.account_pos.volume,
grid.volume))
grid.volume = self.account_pos.volume
"""
vt_orderids = self.sell(
vt_symbol=self.vt_symbol,
price=sell_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'多单平仓委托失败')
else:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def grid_cover(self, grid):
"""
事务平空单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平空仓位:{}'.format(grid.to_json()))
"""
self.account_pos = self.cta_engine.get_position(
vt_symbol=self.vt_symbol,
direction=Direction.NET)
if self.account_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.vt_symbol))
return False
"""
# 发出委托单
if self.backtesting:
cover_price = self.cur_price + self.price_tick
else:
cover_price = self.cur_tick.ask_price_1
# 发出cover委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
"""
if self.account_pos.volume >= 0:
self.write_error(u'当前{}的净持仓:{},不能平空单'
.format(self.vt_symbol,
self.account_pos.volume))
return False
if abs(self.account_pos.volume) < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.vt_symbol,
self.account_pos.volume,
grid.volume))
grid.volume = abs(self.account_pos.volume)
"""
vt_orderids = self.cover(
price=cover_price,
vt_symbol=self.vt_symbol,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'空单平仓委托失败')
else:
self.write_error(u'空单平仓委托失败')
return False
else:
self.write_log(u'空单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.SUBMITTING, Status.NOTTRADED] and order_type == OrderType.LIMIT:
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_log(u'撤单失败,更新状态为撤单成功')
order_info.update({'status': Status.CANCELLED})
self.active_orders.update({vt_orderid: order_info})
if order_grid and vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'委托单{}已成功撤单,删除{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if reopen:
# 撤销的委托单,属于开仓类,需要重新委托
if order_info['offset'] == Offset.OPEN:
self.write_log(u'超时撤单后,重新开仓')
# 开空委托单
if order_info['direction'] == Direction.SHORT:
if self.backtesting:
short_price = self.cur_price - self.price_tick
else:
short_price = self.cur_tick.bid_price_1
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开空委托,开空价{},v:{}'.format(order_vt_symbol, short_price, order_volume))
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderid:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': short_price})
else:
self.write_error(u'撤单后,重新委托开空仓失败')
else:
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开多委托,开多价{},v:{}'.format(order_vt_symbol, buy_price, order_volume))
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': buy_price})
else:
self.write_error(u'撤单后,重新委托开多仓失败')
else:
# 属于平多委托单
if order_info['direction'] == Direction.SHORT:
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
self.write_log(u'重新提交{}平多委托,{},v:{}'.format(order_vt_symbol, sell_price, order_volume))
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平多仓失败')
# 属于平空委托单
else:
if self.backtesting:
cover_price = self.cur_price + self.price_tick
else:
cover_price = self.cur_tick.ask_price_1
self.write_log(u'重新提交{}平空委托,委托价{},v:{}'.format(order_vt_symbol, cover_price, order_volume))
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平空仓失败')
else:
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0 \
and not order_grid.open_status \
and not order_grid.order_status \
and order_grid.traded_volume == 0:
self.write_log(u'移除从未开仓成功的委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(f'活动订单撤单成功,移除{vt_orderid}')
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
self.account_pos = self.cta_engine.get_position(vt_symbol=self.vt_symbol, direction=Direction.NET)
if self.account_pos:
self.write_log(
f'账号{self.vt_symbol}持仓:{self.account_pos.volume}, 冻结:{self.account_pos.frozen}, 盈亏:{self.account_pos.pnl}')
up_grids_info = ""
for grid in list(self.gt.up_grids):
if not grid.open_status and grid.order_status:
up_grids_info += f'平空中: [已平:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
up_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status:
up_grids_info += f'持空中: [数量:{grid.volume}, 开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status:
up_grids_info += f'开空中: [已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
up_grids_info += f'委托单号:{grid.order_ids}'
dn_grids_info = ""
for grid in list(self.gt.dn_grids):
if not grid.open_status and grid.order_status:
dn_grids_info += f'平多中: [已平:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status:
dn_grids_info += f'持多中: [数量:{grid.volume}, 开仓价:{grid.open_price},开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status:
dn_grids_info += f'开多中: [已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
if len(up_grids_info) > 0:
self.write_log(up_grids_info)
if len(dn_grids_info) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前 {}价格:{}, 委托状态:{}'
.format(self.cur_datetime, self.vt_symbol, self.cur_price, self.entrust))
if len(self.active_orders) > 0:
self.write_log('当前活动订单:{}'.format(self.active_orders))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
if policy:
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(json.dumps(policy.to_json(), indent=2, ensure_ascii=False)))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if 'margin' not in dist_data:
dist_data.update({'margin': dist_data.get('price', 0) * dist_data.get('volume',
0) * self.cta_engine.get_margin_rate(
dist_data.get('symbol', self.vt_symbol))})
if 'datetime' not in dist_data:
dist_data.update({'datetime': self.cur_datetime})
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class CtaSpotTemplate(CtaTemplate):
"""
现货模板
"""
asset_symbol = "" # 资产币 BTCUSDT => BTC
quote_symbol = "" # 定价币 BTCUSDT => USDT
price_tick = 0.01 # 商品的最小价格跳动
symbol_size = 1 # 商品得合约乘数
margin_rate = 1 # 商品的保证金
volumn_tick = 0.01 # 商品最小成交数量
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
activate_market = False
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1) asset / virtual_quote
max_invest_margin = 0 # 资金上限 0,不限制 virtual_quote
max_invest_pos = 0 # 单向头寸数量上限 0,不限制 asset
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price', 'margin',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
# vt_symbol => symbol, exchange
self.symbol, self.exchange = extract_vt_symbol(vt_symbol)
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.price_tick = 0.01 # 商品的最小价格跳动
self.symbol_size = 1 # 商品得合约乘数
self.margin_rate = 1 # 商品的保证金
self.volumn_tick = 0.01 # 商品最小成交数量
self.cancel_seconds = 120 # 撤单时间(秒)
self.activate_market = False
self.order_type = OrderType.LIMIT
self.backtesting = False
self.cur_datetime: datetime = None # 当前Tick时间
self.cur_tick: TickData = None # 最新的合约tick( vt_symbol)
self.cur_price = None # 当前价(主力合约 vt_symbol)
self.asset_pos = None # 当前asset_symbol持仓信息
self.quote_pos = None # 当前quote_symbol的持仓信息
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
self.display_bars = True
super().__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
self.position.maxPos = sys.maxsize
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
self.volumn_tick = self.cta_engine.get_volume_tick(self.vt_symbol)
# 检查资产币+定价币是否与vt_symbol一致
if self.symbol != f'{self.asset_symbol}{self.quote_symbol}':
raise Exception(f'{self.vt_symbol}与{self.asset_symbol}+{self.quote_symbol}不匹配')
if self.activate_market:
self.write_log(f'{self.strategy_name}使用市价单委托方式')
self.order_type = OrderType.MARKET
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据,最新bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_policy(self):
self.write_log(u'init_policy(),初始化执行逻辑')
if self.policy:
self.policy.load()
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
changed = False
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
[self.cancel_order(vt_orderid) for vt_orderid in lg.order_ids]
lg.order_ids = []
changed = True
self.write_log(u'加载持仓多单[{},价格:{},数量:{}手, 开仓时间:{}'
.format(lg.vt_symbol, lg.open_price, lg.volume, lg.open_time))
self.position.long_pos = round(self.position.long_pos + lg.volume, 7)
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
self.position.pos = round(self.position.long_pos + self.position.short_pos, 7)
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
if changed:
self.gt.save()
self.display_grids()
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
pos_list.append({'vt_symbol': f'{self.asset_symbol}.{self.exchange.value}',
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': g.open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_trade(self, trade: TradeData):
"""交易更新"""
self.write_log(u'{},交易更新:{},当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['margin'] = trade.price * trade.volume * self.cta_engine.get_margin_rate(trade.vt_symbol)
dist_record['symbol'] = trade.vt_symbol
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新,{}'.format(self.cur_datetime, order.__dict__))
if order.vt_orderid in self.active_orders:
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if order.offset == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'{},委托单:{}全部完成'.format(order.time, order.vt_orderid))
order_info = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = order_info.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if order.offset != Offset.OPEN:
grid.open_status = False
grid.close_status = True
if grid.volume < order.traded:
self.write_log(f'网格平仓数量{grid.volume},小于委托单成交数量:{order.volume},修正为:{order.volume}')
grid.volume = order.traded
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
grid.traded_volume = round(grid.traded_volume, 7)
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
# 在策略得活动订单中,移除
self.write_log(f'委托单{order.vt_orderid}完成,从活动订单中移除')
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
:param order:
:return:
"""
self.write_log(u'委托开仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 委托信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分开仓:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = grid.traded_volume
grid.traded_volume = 0
grid.open_status = True
self.write_log(f'开仓完成,grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'委托平仓单撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'{} 订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
grid = old_order.get('grid', None)
pre_status = old_order.get('status', Status.NOTTRADED)
if pre_status == Status.CANCELLED:
self.write_log(f'当前状态已经是{Status.CANCELLED},不做调整处理')
return
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order.traded > 0:
pre_traded_volume = grid.traded_volume
grid.traded_volume = round(grid.traded_volume + order.traded, 7)
self.write_log(f'撤单中部分平仓成交:{order.traded} + 原已成交:{pre_traded_volume} => {grid.traded_volume}')
if len(grid.order_ids) == 0:
grid.order_status = False
if grid.traded_volume > 0:
pre_volume = grid.volume
grid.volume = round(grid.volume - grid.traded_volume, 7)
grid.traded_volume = 0
if grid.volume <= 0:
grid.volume = 0
grid.open_status = False
self.write_log(f'强制全部平仓完成')
else:
self.write_log(f'平仓委托中,撤单完成,部分成交,减少持仓grid.volume {pre_volume} => {grid.volume}')
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
self.write_log(f'停止单触发:{stop_order.__dict__}')
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading and not self.inited:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids(direction=Direction.LONG)
for g in long_grids:
if g.stop_price > 0 and g.stop_price > self.cur_price and g.open_status and not g.order_status:
# 调用平仓模块
self.write_log(u'{} {}当前价:{} 触发多单止损线{},开仓价:{},v:{}'.
format(self.cur_datetime,
g.vt_symbol,
self.cur_price,
g.stop_price,
g.open_price,
g.volume))
if self.grid_sell(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
def grid_buy(self, grid):
"""
事务开多仓
:return:
"""
if self.backtesting:
buy_price = self.cur_price + self.price_tick
else:
buy_price = self.cur_tick.ask_price_1
if self.quote_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.quote_symbol))
return False
vt_orderids = self.buy(vt_symbol=self.vt_symbol,
price=buy_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'创建{}事务多单,开仓价:{},数量:{},止盈价:{},止损价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price, grid.stop_price))
self.gt.save()
return True
else:
self.write_error(u'创建{}事务多单,委托失败,开仓价:{},数量:{},止盈价:{}'
.format(grid.type, grid.open_price, grid.volume, grid.close_price))
return False
def grid_sell(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
if self.asset_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(self.asset_symbol))
return False
# 发出委托卖出单
if self.backtesting:
sell_price = self.cur_price - self.price_tick
else:
sell_price = self.cur_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.volume = round(grid.volume, 7)
grid.traded_volume = 0
if self.asset_pos.volume <= 0:
self.write_error(u'当前{}的净持仓:{},不能平多单'
.format(self.asset_symbol,
self.asset_pos.volume))
return False
if self.asset_pos.volume < grid.volume:
self.write_error(u'当前{}的净持仓:{},不满足平仓目标:{}, 强制降低'
.format(self.asset_symbol,
self.asset_pos.volume,
grid.volume))
grid.volume = self.asset_pos.volume
vt_orderids = self.sell(
vt_symbol=self.vt_symbol,
price=sell_price,
volume=grid.volume,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=grid)
if len(vt_orderids) == 0:
if self.backtesting:
self.write_error(u'多单平仓委托失败')
else:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.SUBMITTING, Status.NOTTRADED] and order_type == OrderType.LIMIT:
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_log(u'撤单失败,更新状态为撤单成功')
order_info.update({'status': Status.CANCELLED})
self.active_orders.update({vt_orderid: order_info})
if order_grid and vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'委托单{}已成功撤单,删除{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0 \
and not order_grid.open_status \
and not order_grid.order_status \
and order_grid.traded_volume == 0:
self.write_log(u'移除从未开仓成功的委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(f'活动订单撤单成功,移除{vt_orderid}')
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
self.assett_pos = self.cta_engine.get_position(vt_symbol=f'{self.asset_symbol}.{self.exchange.value}', direction=Direction.NET)
if self.asset_pos:
self.write_log(
f'账号{self.asset_symbol}持仓:{self.asset_pos.volume}, 冻结:{self.asset_pos.frozen}')
self.quote_pos = self.cta_engine.get_position(vt_symbol=f'{self.quote_symbol}.{self.exchange.value}', direction=Direction.NET)
if self.quote_pos:
self.write_log(
f'账号{self.quote_symbol}持仓:{self.quote_pos.volume}, 冻结:{self.quote_pos.frozen}')
dn_grids_info = ""
for grid in list(self.gt.dn_grids):
if grid.close_status and not grid.open_status and grid.order_status:
dn_grids_info += f'平多中: {grid.vt_symbol}[已平:{grid.traded_volume} => 目标:{grid.volume}, 平仓价格:{grid.close_price},委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
continue
if grid.open_status and not grid.order_status and not grid.close_status:
dn_grids_info += f'持多中: {grid.vt_symbol}[数量:{grid.volume}, 开仓价格:{grid.open_price},开仓时间:{grid.open_time}]\n'
continue
if not grid.open_status and grid.order_status and not grid.close_status:
dn_grids_info += f'开多中: {grid.vt_symbol}[已开:{grid.traded_volume} => 目标:{grid.volume}, 委托时间:{grid.order_time}]\n'
if len(grid.order_ids) > 0:
dn_grids_info += f'委托单号:{grid.order_ids}'
if len(dn_grids_info) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前 {}价格:{}, 委托状态:{}'
.format(self.cur_datetime, self.vt_symbol, self.cur_price, self.entrust))
if len(self.active_orders) > 0:
self.write_log('当前活动订单数:{}'.format(len(self.active_orders))) #json.dumps(self.active_orders, indent=2, ensure_ascii=False)))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
if policy:
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(json.dumps(policy.to_json(), indent=2, ensure_ascii=False)))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if 'margin' not in dist_data:
dist_data.update({'margin': dist_data.get('price', 0) * dist_data.get('volume',
0) * self.cta_engine.get_margin_rate(
dist_data.get('symbol', self.vt_symbol))})
if 'datetime' not in dist_data:
dist_data.update({'datetime': self.cur_datetime})
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class MultiContractPolicy(CtaPolicy):
"""多合约Policy,记录持仓"""
def __init__(self, strategy=None, **kwargs):
super().__init__(strategy, **kwargs)
self.debug = kwargs.get('debug', False)
self.positions = {} # vt_symbol: net_pos
def from_json(self, json_data):
"""将数据从json_data中恢复"""
super().from_json(json_data)
self.positions = json_data.get('positions')
def to_json(self):
"""转换至json文件"""
j = super().to_json()
j['positions'] = self.positions
return j
def on_trade(self, trade: TradeData):
"""更新交易"""
pos = self.positions.get(trade.vt_symbol)
if pos is None:
pos = 0
pre_pos = pos
if trade.direction == Direction.LONG:
pos = round(pos + trade.volume, 7)
elif trade.direction == Direction.SHORT:
pos = round(pos - trade.volume, 7)
self.positions.update({trade.vt_symbol: pos})
if self.debug and self.strategy:
self.strategy.write_log(f'{trade.vt_symbol} pos:{pre_pos}=>{pos}')
self.save()
class MultiContractTemplate(CtaTemplate):
"""多合约交易模板"""
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
self.policy = None
self.cur_datetime = None
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.policy = MultiContractPolicy(strategy=self, debug=True)
def sync_data(self):
"""同步更新数据"""
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def on_trade(self, trade: TradeData):
"""成交回报事件处理"""
self.policy.on_trade(trade)
def get_positions(self):
""" 获取策略所有持仓详细"""
pos_list = []
for vt_symbol, pos in self.policy.positions.items():
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'long' if pos >= 0 else 'short',
'volume': pos})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'{}当前持仓:{}'.format(self.strategy_name, pos_list))
return pos_list
def on_order(self, order: OrderData):
pass
def on_init(self):
self.inited = True
def on_start(self):
self.trading = True
def on_stop(self):
self.trading = False
|
nilq/baby-python
|
python
|
# Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
testDir = '/home/pcorrado/Cardiac-DL-Segmentation-Paper/test'
modelBasePath = '/home/pcorrado/Cardiac-DL-Segmentation-Paper/Cardiac-Segmentation-4D-Flow/TrainedModels'
modelPaths = ['model_{}_layers_frozen'.format(l) for l in [4,8,12,14,15]]
modelPaths.append('modelUnfrozen')
modelName = 'FCN_sa_level5_filter16_22333_batch20_iter10000_lr0.001'
numLayers = [4,8,12,14,15,0]
if __name__ == '__main__':
for ii in range(len(modelPaths)):
os.system('python3 common/deploy_network.py --data_dir {0} '
'--model_path {1}/{2}/{3}/{3}.ckpt-10000'.format(testDir, modelBasePath, modelPaths[ii], modelName))
for data in sorted(os.listdir(testDir)):
data_dir = os.path.join(testDir, data)
os.system('mv {0}/seg_sa.nii.gz {0}/sa_label_{1}.nii.gz'.format(data_dir, numLayers[ii]))
|
nilq/baby-python
|
python
|
# coding:utf-8
from lxml import etree
import requests
import config
def checkProxyType(selfip, proxies):
'''
用来检测代理的类型,突然发现,免费网站写的信息不靠谱,还是要自己检测代理的类型
:param proxies: 代理(0 高匿,1 匿名,2 透明 3 无效代理
:return:
'''
try:
r = requests.get(url='https://incloak.com/ip/', headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies)
print(r.text)
# if r.ok:
# root = etree.HTML(r.text)
# ip = root.xpath('.//center[2]/table/tr[3]/td[2]')[0].text
# http_x_forwared_for = root.xpath('.//center[2]/table/tr[8]/td[2]')[0].text
# http_via = root.xpath('.//center[2]/table/tr[9]/td[2]')[0].text
# # print ip,http_x_forwared_for,http_via,type(http_via),type(http_x_forwared_for)
# if ip==selfip:
# return 3
# if http_x_forwared_for is None and http_via is None:
# return 0
# if http_via != None and http_x_forwared_for.find(selfip)== -1:
# return 1
#
# if http_via != None and http_x_forwared_for.find(selfip)!= -1:
# return 2
# return 3
except Exception as e:
print(str(e))
return 3
if __name__ == '__main__':
ip = '61.132.241.109'
port = '808'
proxies = {"http": "http://%s:%s" % (ip, port), "https": "http://%s:%s" % (ip, port)}
checkProxyType(None, proxies)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.