text stringlengths 38 1.54M |
|---|
import csv
import statistics
#This function iterates over quant_bootstrap.tsv and creates a map for confidence interval for each transcript
def evaluateCI(inputDir):
trCIMap = dict()
with open('input/' + inputDir + '/quant_bootstraps.tsv') as tsv:
for column in zip(*[line for line in csv.reader(tsv, dialect="excel-tab")]):
bootstrapData = list(column)
trID = bootstrapData.pop(0)
bootstrapData = [float(x) for x in bootstrapData]
mean = statistics.mean(bootstrapData)
sd = statistics.stdev(bootstrapData, xbar=mean)
trCIMap[trID] = (mean - 2*sd), (mean + 2*sd)
return trCIMap
#This is a utilty function is calculate mean and standard deviation of each column.
def get_mean_sd(inputDir):
txp_mean_sd_map = dict()
with open('input/' + inputDir + '/quant_bootstraps.tsv') as tsv:
for column in zip(*[line for line in csv.reader(tsv, dialect="excel-tab")]):
bootstrapData = list(column)
trID = bootstrapData.pop(0)
bootstrapData = [float(x) for x in bootstrapData]
mean = statistics.mean(bootstrapData)
sd = statistics.stdev(bootstrapData, xbar=mean)
txp_mean_sd_map[trID] = mean, sd
return txp_mean_sd_map
#This function iterates over quant_bootstrap.tsv and creates a map for confidence interval for each transcript
def evaluateCI_new_quant(inputDir):
trCIMap = dict()
with open('output/quant_bootstraps_new.tsv') as tsv:
for column in zip(*[line for line in csv.reader(tsv, dialect="excel-tab")]):
bootstrapData = list(column)
trID = bootstrapData.pop(0)
bootstrapData = [float(x) for x in bootstrapData]
mean = statistics.mean(bootstrapData)
sd = statistics.stdev(bootstrapData, xbar=mean)
trCIMap[trID] = (mean - 2*sd), (mean + 2*sd)
return trCIMap
|
r1=float(input("resistor1: "))
r2=float(input("resistor2: "))
r3=float(input("resistor3: "))
req=(r1*r2*r3)/((r1*r2)+(r2*r3)+(r1*r3))
print(req) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-01-15 07:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('award', '0003_auto_20200115_1005'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='profile',
),
]
|
#!/usr/bin/env python
# coding: utf8
"""
A simple example for training a part-of-speech tagger with a custom tag map.
To allow us to update the tag map with our custom one, this example starts off
with a blank Language class and modifies its defaults. For more details, see
the documentation:
* Training: https://spacy.io/usage/training
* POS Tagging: https://spacy.io/usage/linguistic-features#pos-tagging
Compatible with: spaCy v2.0.0+
"""
from __future__ import unicode_literals, print_function
import plac
import random
from pathlib import Path
import spacy
import tqdm
from tagger_sv import input_fn
from spacy.gold import GoldParse
# TRAIN_DATA = input_fn("./data/parser/talbanken-stanford-train.conll")
TRAIN_DATA = input_fn("./data/parser/text.txt")
# You need to define a mapping from your data's part-of-speech tag names to the
# Universal Part-of-Speech tag set, as spaCy includes an enum of these tags.
# See here for the Universal Tag Set:
# http://universaldependencies.github.io/docs/u/pos/index.html
# You may also specify morphological features for your tags, from the universal
# scheme.
# Mapping from the Stockholm-Umeå Corpus (SUC) tags to the Universal POS tags.
TAG_MAP = {
"AB": {'pos': 'ADV'},
"DT": {'pos': 'DET'},
"HA": {'pos': 'ADV'},
"HD": {'pos': 'DET'},
"HP": {'pos': 'PRON'},
"HS": {'pos': 'PRON'},
"IE": {'pos': 'PART'},
"IN": {'pos': 'X'},
"JJ": {'pos': 'ADJ'},
"KN": {'pos': 'CONJ'},
"NN": {'pos': 'NOUN'},
"PC": {'pos': 'ADJ'},
"PL": {'pos': 'PART'},
"PM": {'pos': 'NOUN'},
"PN": {'pos': 'PRON'},
"PP": {'pos': 'ADP'},
"PS": {'pos': 'PRON'},
"RG": {'pos': 'NUM'},
"RO": {'pos': 'NUM'},
"SN": {'pos': 'CONJ'},
"VB": {'pos': 'VERB'},
"UO": {'pos': 'X'},
"MAD": {'pos': 'PUNCT'},
"MID": {'pos': 'PUNCT'},
"PAD": {'pos': 'PUNCT'}
}
# Usually you'll read this in, of course. Data formats vary. Ensure your
# strings are unicode and that the number of tags assigned matches spaCy's
# tokenization. If not, you can always add a 'words' key to the annotations
# that specifies the gold-standard tokenization, e.g.:
# ("Eatblueham", {'words': ['Eat', 'blue', 'ham'] 'tags': ['V', 'J', 'N']})
# TRAIN_DATA = [
# ("Jag gillar gröna ägg", {'tags': ['NN', 'VB', 'JJ', 'NN']}),
# ("Ät blå skinka", {'tags': ['VB', 'JJ', 'NN']})
# ]
@plac.annotations(
lang=("ISO Code of language to use", "option", "l", str),
output_dir=("Optional output directory", "option", "o", Path),
n_iter=("Number of training iterations", "option", "n", int))
def main(lang='en', output_dir=None, n_iter=1):
"""Create a new model, set up the pipeline and train the tagger. In order to
train the tagger with a custom tag map, we're creating a new Language
instance with a custom vocab.
"""
nlp = spacy.blank(lang)
# add the tagger to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
tagger = nlp.create_pipe('tagger')
# Add the tags. This needs to be done before you start training.
for tag, values in TAG_MAP.items():
tagger.add_label(tag, values)
nlp.add_pipe(tagger)
optimizer = nlp.begin_training(lambda: [])
for i in range(n_iter):
# random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in tqdm.tqdm(TRAIN_DATA):
# doc = nlp(text)
# annotations = GoldParse(doc, tags=annotations.get('tags'), make_projective=True)
nlp.update([text], [annotations], sgd=optimizer, losses=losses)
print("n_iter: {},\tloss: {}".format(i, losses['tagger']))
# test the trained model
test_text = "Jag gillar blåa ägg"
doc = nlp(test_text)
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
# test the save model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc = nlp2(test_text)
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the save model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc = nlp2(test_text)
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
if __name__ == '__main__':
plac.call(main)
# Expected output:
# [
# ('Jag', 'NN', 'NOUN'),
# ('gillar', 'VB', 'VERB'),
# ('blåa', 'JJ', 'ADJ'),
# ('ägg', 'NN', 'NOUN')
# ]
|
"""
RL-Scope: Cross-Stack Profiling for Deep Reinforcement Learning Workloads
"""
# NOTE: setup.py is based on the one from tensorflow.
from os.path import join as _j, abspath as _a, exists as _e, dirname as _d, basename as _b
import fnmatch
import argparse
from glob import glob
import shlex
import os
import re
import subprocess
import pprint
import textwrap
import sys
from setuptools import find_packages
from setuptools import setup
# from distutils.command.build_py import build_py as _build_py
# from distutils.command.clean import clean as _clean
from distutils.spawn import find_executable
ROOT = _d(os.path.realpath(__file__))
# HACK: Make it so we can import logging stuff.
sys.path.insert(0, ROOT)
from rlscope.profiler.rlscope_logging import logger
from rlscope.profiler import rlscope_logging
from rlscope import py_config
PROJECT_NAME = 'rlscope'
def pprint_msg(dic, prefix=' '):
"""
Give logger.info a string for neatly printing a dictionary.
Usage:
logger.info(pprint_msg(arbitrary_object))
"""
return "\n" + textwrap.indent(pprint.pformat(dic), prefix=prefix)
def get_files_by_ext(root, rm_prefix=None):
files_by_ext = dict()
for path in each_file_recursive(root):
ext = file_extension(path)
if ext not in files_by_ext:
files_by_ext[ext] = []
if rm_prefix is not None:
path = re.sub(r'^{prefix}/'.format(prefix=rm_prefix), '', path)
files_by_ext[ext].append(path)
return files_by_ext
def file_extension(path):
m = re.search(r'\.(?P<ext>[^.]+)$', path)
if not m:
return None
return m.group('ext')
def each_file_recursive(root_dir):
if not os.path.isdir(root_dir):
raise ValueError("No such directory {root_dir}".format(root_dir=root_dir))
for dirpath, dirnames, filenames in os.walk(root_dir):
for base in filenames:
path = _j(dirpath, base)
yield path
def cmd_debug_msg(cmd, env=None, dry_run=False):
if type(cmd) == list:
cmd_str = " ".join([shlex.quote(str(x)) for x in cmd])
else:
cmd_str = cmd
lines = []
if dry_run:
lines.append("> CMD [setup.py] [dry-run]:")
else:
lines.append("> CMD [setup.py]:")
lines.extend([
" $ {cmd}".format(cmd=cmd_str),
" PWD={pwd}".format(pwd=os.getcwd()),
])
if env is not None and len(env) > 0:
env_vars = sorted(env.keys())
lines.append(" Environment:")
for var in env_vars:
lines.append(" {var}={val}".format(
var=var,
val=env[var]))
string = '\n'.join(lines)
return string
# def log_cmd(cmd, env=None, dry_run=False):
# string = cmd_debug_msg(cmd, env=env, dry_run=dry_run)
#
# logging.info(string)
def print_cmd(cmd, files=sys.stdout, env=None, dry_run=False):
string = cmd_debug_msg(cmd, env=env, dry_run=dry_run)
if type(files) not in [set, list]:
if type(files) in [list]:
files = set(files)
else:
files = set([files])
for f in files:
print(string, file=f)
f.flush()
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
else:
protoc = find_executable("protoc")
PYTHON_SRC_DIR = "python"
DOCLINES = __doc__.lstrip().rstrip().split('\n')
# https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/specification.html
REQUIREMENTS_TXT = _j(py_config.ROOT, "requirements.txt")
def read_requirements(requirements_txt):
requires = []
with open(requirements_txt) as f:
for line in f:
line = line.rstrip()
line = re.sub(r'#.*', '', line)
if re.search(r'^\s*$', line):
continue
requires.append(line)
return requires
def read_version(version_txt):
with open(version_txt) as f:
version = f.read().rstrip()
return version
#
# NOTE: this is identical to requirements found in requirements.txt.
# (please keep in sync)
#
REQUIRED_PACKAGES = read_requirements(REQUIREMENTS_TXT)
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
#
# NOTE: We must change the RLSCOPE_VERSION every time we upload to pip./
RLSCOPE_VERSION = py_config.read_rlscope_version()
# NOTE: dependencies for building docker images are defined in dockerfiles/requirements.txt
# DOCKER_PACKAGES = [
# 'PyYAML >= 5.1',
# 'absl-py >= 0.1.6',
# 'cerberus >= 1.3',
# 'docker >= 3.7.2',
# ]
def get_cuda_version():
"""
Determine the CUDA version we are building for.
NOTE: C++ components have a hard CUDA version dependency.
If CUDA environment variable is defined:
Use $CUDA
Elif /usr/local/cuda is a symlink to /usr/local/cuda-${CUDA_VERSION}:
Use $CUDA_VERSION
Else:
Use 10.1 (default)
"""
if 'CUDA' in os.environ:
return os.environ['CUDA']
elif os.path.islink('/usr/local/cuda'):
cuda_path = os.path.realpath('/usr/local/cuda')
m = re.search(r'^cuda-(?P<cuda_version>.*)', os.path.basename(cuda_path))
cuda_version = m.group('cuda_version')
return cuda_version
# Default:
return '10.1'
def get_pip_package_version():
"""
Mimic how pytorch specifies cuda dependencies:
e.g.
torch==1.7.1+cu110
For CUDA 11.0
"""
# Mimic how pytorch specifies cuda dependencies:
# e.g.
# torch==1.7.1+cu110
cuda_version = get_cuda_version()
cu_version = re.sub(r'\.', '', cuda_version)
pip_package_version = "{rlscope_version}+cu{cu_version}".format(
rlscope_version=RLSCOPE_VERSION,
cu_version=cu_version,
)
return pip_package_version
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'rls-prof = rlscope.scripts.cuda_api_prof:main',
'rls-plot = rlscope.parser.calibration:main_plot',
'rls-run = rlscope.scripts.analyze:main',
'rls-util-sampler = rlscope.scripts.utilization_sampler:main',
'rls-dump-proto = rlscope.scripts.dump_proto:main',
'rls-calibrate = rlscope.parser.calibration:main_run',
# Running various experiments for RL-Scope paper.
# Used by artifact evaluation.
'rls-run-expr = rlscope.scripts.run_expr:main',
'rls-bench = rlscope.scripts.bench:main',
'rls-quick-expr = rlscope.scripts.quick_expr:main',
# Not yet ready for "prime time"...
'rls-generate-plot-index = rlscope.scripts.generate_rlscope_plot_index:main',
]
# pylint: enable=line-too-long
# Only install these inside the docker development environment
# (or, when "python setup.py develop" is called).
DEVELOPMENT_SCRIPTS = [
# NOTE: we don't install rls-analyze wrapper script; we instead depend on source_me.sh
# (develop_rlscope in container) to add rls-test to PATH.
# Python / C++ unit test runner.
'rls-unit-tests = rlscope.scripts.rls_unit_tests:main',
'rlscope-is-development-mode = rlscope.scripts.cpp.cpp_binary_wrapper:rlscope_is_development_mode'
]
# NOTE: the presence of this on "PATH" tells us whether rlscope was installed using a wheel file,
# or using "python setup.py develop" (i.e., don't install it, so it's not on PATH).
PRODUCTION_SCRIPTS = [
'rls-analyze = rlscope.scripts.cpp.cpp_binary_wrapper:rls_analyze',
# Wrapper around C++ unit tests.
# Useful if we wish to run unit tests with the wheel file.
# 'rls-test = rlscope.scripts.cpp.cpp_binary_wrapper:rls_test',
# 'rlscope-pip-installed = rlscope.scripts.cpp.cpp_binary_wrapper:rlscope_pip_installed',
]
def find_files(pattern, root):
for direc, dirs, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(direc, filename)
def generate_proto(source, require=True, regenerate=False):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if regenerate or (
not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
logger.debug("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
# protoc -I$PWD --python_out=. prof_protobuf/*.proto
# protoc_command = [protoc, "-I.", "--python_out=.", "{dir}/*.proto".format(
protoc_command = [protoc, "-I.", "--python_out=.", source]
print_cmd(protoc_command)
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
PROTOBUF_DIR = 'rlscope/protobuf'
proto_files = list(find_files('*.proto', PROTOBUF_DIR))
POSTGRES_SQL_DIR = 'postgres'
THIRD_PARTY_DIR = 'third_party'
# PYTHON_PACKAGE_DIRS = [_j(PYTHON_SRC_DIR, direc) \
# for direc in find_packages(where=PYTHON_SRC_DIR)]
PYTHON_PACKAGE_DIRS = [
'rlscope',
]
PACKAGE_DIRS = PYTHON_PACKAGE_DIRS
# PACKAGE_DIRS = PYTHON_PACKAGE_DIRS + \
# [
# PROTOBUF_DIR,
# POSTGRES_SQL_DIR,
# THIRD_PARTY_DIR,
# ]
def main():
#
# Parse command line arguments.
#
parser = argparse.ArgumentParser("Install RL-Scope python module", add_help=False)
parser.add_argument('setup_cmd', nargs='?', default=None, help="setup.py command (e.g., develop, install, bdist_wheel, etc.)")
parser.add_argument('--debug', action='store_true')
parser.add_argument('--help', '-h', action='store_true')
parser.add_argument('--debug-skip-cpp', action='store_true',
help="(Internal debugging) Don't include librlscope.so")
args, extra_argv = parser.parse_known_args()
def is_development_mode():
return args.setup_cmd == 'develop'
def is_production_mode():
return not is_development_mode()
# Remove any arguments that were parsed using argparse.
# e.g.,
# ['setup.py', 'bdist_wheel', '--debug'] =>
# ['setup.py', 'bdist_wheel']
setup_argv = [sys.argv[0]]
if args.setup_cmd is not None:
setup_argv.append(args.setup_cmd)
if args.help:
setup_argv.append('--help')
setup_argv.extend(extra_argv)
sys.argv = setup_argv
if args.debug:
rlscope_logging.enable_debug_logging()
else:
rlscope_logging.disable_debug_logging()
logger.debug("setup_argv = {argv}".format(argv=sys.argv))
logger.debug("> Using protoc = {protoc}".format(protoc=protoc))
logger.debug(pprint.pformat({
'proto_files': proto_files,
'PACKAGE_DIRS': PACKAGE_DIRS,
}))
with open("README.md", "r") as fh:
long_description = fh.read()
def _proto(base):
return _j(PROTOBUF_DIR, base)
if args.setup_cmd is not None:
generate_proto(_proto('pyprof.proto'), regenerate=True)
generate_proto(_proto('unit_test.proto'), regenerate=True)
generate_proto(_proto('rlscope_prof.proto'), regenerate=True)
rlscope_ext = get_files_by_ext('rlscope', rm_prefix='rlscope')
logger.debug("rlscope_ext = {msg}".format(
msg=pprint_msg(rlscope_ext),
))
package_data = {
'rlscope': [
# NOTE: we avoid using glob(..) patterns like "**/*.py" here since
# we need to make one for each directory level...
# we really just want to glob for "all python files",
# which we do using each_file_recursive(...).
],
}
keep_ext = {'cfg', 'ini', 'py', 'proto'}
for ext in set(rlscope_ext.keys()).intersection(keep_ext):
package_data['rlscope'].extend(rlscope_ext[ext])
if is_production_mode() and not args.debug_skip_cpp:
# If there exist files in rlscope/cpp/**/*
# assume that we wish to package these into the wheel.
cpp_files = glob(_j(ROOT, 'rlscope', 'cpp', '**', '*'))
# Keep all rlscope/cpp/**/* files regardless of extension.
cpp_ext = get_files_by_ext('rlscope/cpp', rm_prefix='rlscope')
logger.debug("cpp_ext = \n{msg}".format(
msg=pprint_msg(cpp_ext),
))
if len(cpp_files) == 0:
logger.error(textwrap.dedent("""\
Looks like you're trying to build a python wheel for RL-Scope, but you haven't built the C++ components yet (i.e., librlscope.so, rls-analyze).
To build a python wheel, run this:
$ cd {root}
$ BUILD_PIP=yes bash ./setup.sh
""".format(
root=py_config.ROOT,
).rstrip()))
sys.exit(1)
for ext, paths in cpp_ext.items():
package_data['rlscope'].extend(paths)
logger.debug("package_data = \n{msg}".format(
msg=pprint_msg(package_data),
))
console_scripts = []
console_scripts.extend(CONSOLE_SCRIPTS)
if is_production_mode():
console_scripts.extend(PRODUCTION_SCRIPTS)
else:
console_scripts.extend(DEVELOPMENT_SCRIPTS)
# logger.info("entry_points: {msg}".format(msg=pprint_msg(console_scripts)))
if args.help:
# Print both argparse usage AND setuptools setup.py usage info.
parser.print_help()
setup(
name=PROJECT_NAME,
version=get_pip_package_version(),
description=DOCLINES[0],
long_description=long_description,
url='https://github.com/UofT-EcoSystem/rlscope',
download_url='https://github.com/UofT-EcoSystem/rlscope/tags',
author='James Gleeson',
author_email='jgleeson@cs.toronto.edu',
# Contained modules and scripts.
packages=PACKAGE_DIRS,
entry_points={
'console_scripts': console_scripts,
},
install_requires=REQUIRED_PACKAGES,
# tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# # https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
# # These requirements are only installed if these features are enabled when installing the pip package.
# #
# # $ pip install 'rlscope[docker]'
# #
# # Q: Does this work with .whl files?
# extras_require={
# 'docker': DOCKER_PACKAGES,
# },
package_data=package_data,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
],
license='Apache 2.0',
keywords='rlscope ml profiling tensorflow machine learning reinforcement learning',
)
if __name__ == '__main__':
main()
|
from setuptools import setup,find_packages
setup(
name = 'Veda',
version = '0.1',
packages = find_packages(),
author = 'shihua',
description = 'Veda Module',
install_requires = ['ansible','clickhouse-driver'],
entry_points = {
'console_scripts': [
'vedainit = Veda.scripts.cli_init:vedainit',
'vedactl = Veda.scripts.cli:veda'
]
}
) |
from django.shortcuts import render
from creative.models import Sheet1,Worksheet,EquipmentInfo2,EquipmentOrg
# Create your views here.
|
import os
import warnings
from subprocess import call
import requests
from tqdm import tqdm
from .create import create_embedding_database
DEFAULT_GLOVE_DOWNLOAD_PATH = os.path.join('/', 'tmp', 'glove', 'glove.zip')
def download(target_path=None, url=None):
"""Download GloVe Common Crawl 840B tokens.
(840B tokens, 2.2M vocab, cased, 300d vectors, 2.03 GB download)
args:
target_path: where to save
default /tmp/glove/glove.zip
url: where to downlad from
default http://nlp.stanford.edu/data/glove.840B.300d.zip
"""
if target_path is None:
target_path = DEFAULT_GLOVE_DOWNLOAD_PATH
try:
os.makedirs(os.path.dirname(DEFAULT_GLOVE_DOWNLOAD_PATH))
except FileExistsError:
pass
if url is None:
url = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
response = requests.get(url, stream=True)
with open(target_path, 'wb') as f:
pbar = tqdm(
unit="KB", total=int(response.headers['Content-Length']) // 1024)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
pbar.update(len(chunk) // 1024)
f.write(chunk)
def create(embeddings_file=None, path_to_db=None, keep_file=True):
if embeddings_file is None:
if os.path.exists(DEFAULT_GLOVE_DOWNLOAD_PATH):
embeddings_file = DEFAULT_GLOVE_DOWNLOAD_PATH
else:
warnings.warn('''GloVe embeddings file path not specified,
archive not found at default path.
Commencing 2.03GB download.
File will be deleted after DB is created.
default download path is:
{}'''.format(DEFAULT_GLOVE_DOWNLOAD_PATH))
download()
embeddings_file = DEFAULT_GLOVE_DOWNLOAD_PATH
keep_file = False
if path_to_db is None:
path_to_db = os.path.expanduser(os.path.join('~', 'glove'))
create_embedding_database(embeddings_file, path_to_db)
if keep_file is False:
call(['rm', '-f', embeddings_file])
|
from django.contrib import admin
from . models import *
# Register your models here.
admin.site.register(Customer)
admin.site.register(Supplier)
admin.site.register(Country)
admin.site.register(Item)
admin.site.register(Category)
admin.site.register(Promotion)
admin.site.register(Report)
admin.site.register(Document)
admin.site.register(Expense)
admin.site.register(PaymentOption)
admin.site.register(Order)
admin.site.register(OrderItem)
admin.site.register(Payment)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/15 12:06 上午
# @Author : lambert
# @Site :
# @File : Permutations.py
# @Software: PyCharm
class Solution:
def permute(self, nums: list):
# def gen_permute(nums:list):
if len(nums) == 1:
return [nums]
results = []
for i in range(len(nums)):
res=self.permute(nums[:i] + nums[i+1:])
for r in res:
r.append(nums[i])
results.append(r)
return results
# return gen_permute(nums)
print(Solution().permute([1,2,3]))
|
# coding: utf-8
# In[1]:
import pandas as pd
import sys
sys.path.append('../feature engineer/')
from feature_combine import feature_combine
# In[5]:
def nan_process():
print('NaN process ...'.center(50, '*'))
train, test, y = feature_combine()
print(train.shape, test.shape)
train = train.fillna(-1)
test = test.fillna(-1)
return train, test, y
# In[6]:
#train, test = nan_process()
|
from VotingMachine import VotingMachine as VoMa
from Candidate import Candidate as Can
import numpy
import pickle
def main():
# -------------------------------------------------
# ------ Candidate and Preference Creation --------
# -------------------------------------------------
manual_input = False
if manual_input:
# candidates
a = Can('A')
b = Can('B')
c = Can('C')
d = Can('D')
e = Can('E')
candidates = [a, b, c, d, e]
# create preference matrix
preferences = numpy.array([
[a, e, c, d, b],
[a, c, b, d, e],
[c, b, d, e, a],
[b, d, c, e, a],
[b, d, c, e, a],
[d, a, b, c, e]
]).T
else:
# load candidates and preferences from file
with open('data/candidates.pck', 'rb') as pref_file:
candidates = pickle.load(pref_file)
with open('data/preferences_prob.pck', 'rb') as pref_file:
preferences = numpy.array(pickle.load(pref_file)).T
# ------------------------------------------------
# ------ Run voting and get manipulations --------
# ------------------------------------------------
# create voting machine instance
voting_machine = VoMa(preferences, candidates)
# get manipulations from voting machine
manipulations = voting_machine.get_manipulations()
# ---------------------------
# ------ OUTPUT CODE --------
# ---------------------------
# get number of voters and candidates
num_voters = preferences.shape[1]
num_candidates = preferences.shape[0]
mani_per_vs = {}
mani_happ_per_vs = {}
debug_flag = True # will print debug output if set to true
manipulation_output = False # will print list of manipulations if set to true
if debug_flag:
print("Original Preferences")
print(preferences.T)
print("Original Outcome")
for _, vs in enumerate(voting_machine.outcomes):
print(str(vs) + ': ' + str(voting_machine.outcomes[vs]))
print("Original happiness")
print(voting_machine.happinesses)
print("-------")
print("Original happiness sum")
for _, vs in enumerate(voting_machine.happinesses.keys()):
print(vs)
print(sum(voting_machine.happinesses[vs]))
if manipulation_output:
print("\nManipulations:")
for m in manipulations:
print("----")
print(m.strategy + ": " + m.voting_scheme + ", Voter " + str(m.voter))
print("New voter preference")
print(m.strategic_preference.T)
print("New outcome")
print(m.strategic_outcome)
print("New Happiness: ")
print(m.strategic_overall_happiness)
print("New happiness sum")
print(sum(m.strategic_overall_happiness))
print("")
for m in manipulations:
if mani_per_vs.keys().__contains__(m.voting_scheme):
mani_per_vs[m.voting_scheme] += 1
mani_happ_per_vs[m.voting_scheme] += sum(m.strategic_overall_happiness)
else:
mani_per_vs[m.voting_scheme] = 1
mani_happ_per_vs[m.voting_scheme] = sum(m.strategic_overall_happiness)
num_all_mani = len(manipulations)
# Print the results
print("Manipulations overall: \n{}".format(num_all_mani))
for mani, key in enumerate(mani_per_vs.keys()):
print("------------")
print(key)
print("manipulations: {}".format(mani_per_vs[key]))
print("score: {}".format(mani_per_vs[key] / num_voters))
print("avg. happiness after manipulation:")
print(mani_happ_per_vs[key] / mani_per_vs[key])
if __name__ == '__main__':
main()
|
import game_framework
import title_state
from pico2d import *
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
open_canvas()
image = load_image('resource/kpu_credit.png')
f = open('data/money_data.txt', 'w')
first_money_data = {'money': 300 }
json.dump(first_money_data, f)
f.close()
# 파일 출력
f = open('data/player_info_data.txt', 'r')
info_data = json.load(f)
f.close()
info_data.append({'stage': 1, 'score1': 0, 'score2': 0, 'score3': 0, 'money': 300, 'item_life': False, 'item_stop': False })
# 파일 쓰기
f = open('data/player_info_data.txt', 'w')
json.dump(info_data, f)
f.close()
def exit():
global image
del(image)
def update(frame_time):
global logo_time
if (logo_time > 1.0):
logo_time = 0
# game_framework.quit()
game_framework.push_state(title_state)
delay(0.01)
logo_time += 0.01
def draw(frame_time):
global image
clear_canvas()
image.draw(400, 300)
update_canvas()
def handle_events(frame_time):
events = get_events()
pass
def pause(): pass
def resume(): pass
|
from datetime import datetime
user_input = input("Enter your goal with a deadline separated by colon\n")
input_list =user_input.split(":")
goal = input_list[0]
deadline = input_list[1]
deadline_date = datetime.strptime(deadline, "%d.%m.%Y")
today_date = datetime.today()
time_till = deadline_date - today_date
# Calculate how many days from now till deadline
time_till = deadline_date - today_date
hours_till = int(time_till.total_seconds()/60/60)
print(f"Dear user! Time remaining for your goal: {goal} is {hours_till} hours")
|
from django.db import models
# Create your models here.
class StudentsInfoManager(models.Manager):
def all(self):
return super().filter(is_delete=False)
def create_student(self, id, name, age):
student = self.model()
student.sid = id
student.sname = name
student.sage = age
student.sgender = 0
student.is_delete = False
# student.image = True
student.save()
return student
class StudentsInfo(models.Model):
GENDER_CHOICES = (
(0, 'male'),
(1, 'female')
)
sid = models.IntegerField(primary_key=True, verbose_name='学号')
sname = models.CharField(max_length=20, verbose_name='姓名', unique=True)
sage = models.IntegerField(verbose_name='年龄')
sgender = models.BooleanField(choices=GENDER_CHOICES, default=0, verbose_name='性别')
# You are trying to add a non-nullable field 'sdate' to studentsinfo without a default; we can't do that (the database needs something to populate existing rows).
# sdate = models.DateField(verbose_name='入学日期')
# scomment = models.CharField
is_delete = models.BooleanField(default=False, verbose_name='删除个人')
image = models.ImageField(upload_to='test02', verbose_name='图片', null=True)
def old(self):
return self.sage-20
old.short_description = '二十几岁'
old.admin_order_field = 'sage'
class Meta:
db_table = 'students' # 指明数据库表名
verbose_name = '学生表' # 在admin站点中显示的名称
verbose_name_plural =verbose_name # 显示的复数名称
# 在admin站点中显示字段
def __str__(self):
return self.sname
students = StudentsInfoManager()
class CardsInfoManager(models.Manager):
def all(self):
return super().filter(is_delete=False)
def create_card(self, id, name, date, student):
card = self.model()
card.cid = id
card.cname = name
card.cdate = date
card.cstudent = student
card.is_delete = False
card.save()
return card
class CardsInfo(models.Model):
cid = models.IntegerField(primary_key=True, verbose_name='卡号')
cname = models.CharField(max_length=20, verbose_name='什么卡', unique=True)
cdate = models.DateField(verbose_name='注册卡日期')
cstudent = models.ForeignKey(StudentsInfo, on_delete=models.CASCADE, verbose_name='卡外键')
is_delete = models.BooleanField(default=False, verbose_name='逻辑删除')
def pub_date(self):
return self.cdate.strftime('%Y年%m月%d日')
pub_date.short_description = '办理日期'
def c_sid(self):
return self.cstudent.sid
c_sid.short_description = '所属学生id'
c_sid.admin_order_field = 'cid'
class Meta:
db_table = 'cards'
# 以下两行在admin中显示名称
verbose_name = '卡名称'
verbose_name_plural = verbose_name
def __str__(self):
return self.cname
cards = CardsInfoManager()
|
import requests
import requests.exceptions
from bs4 import BeautifulSoup
import re
from money_parser import price_str
headers={"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3829.0 Safari/537.36 Edg/77.0.197.1'}
def get_names_and_prices_of_products_on_given_page(page_url, header, write_to_file, element_for_name, name_class, element_for_price, price_class, postage_element="", postage_element_two=""):
try:
page = requests.get(page_url, headers=headers)
soup = BeautifulSoup(page.content, 'lxml')
strings_from_site = set()
strings_from_site.add(f"\n---------------------------------------------------------------------------------\n{page_url}\n---------------------------------------------------------------------------------")
print(f"\n---------------------------------------------------------------------------------\n{page_url}\n---------------------------------------------------------------------------------")
if not postage_element and not postage_element_two:
for product_name, product_price in zip(soup.find_all(element_for_name, name_class, recursive=True), soup.find_all(element_for_price, price_class, recursive=True)):
if product_name and product_price:
strings_from_site.add(f"\"{' '.join(product_name.get_text().strip().split())}\" has a price of £{float(price_str(product_price.get_text()))}")
print(f"\"{' '.join(product_name.get_text().strip().split())}\" has a price of £{float(price_str(product_price.get_text()))}")
elif not product_name or not product_price:
print(f"Soup was unable to find the specified tag '<{element_for_name}>' with an ID or Class of '.{name_class}' and '<{element_for_price}>' with an ID or Class of '.{price_class}'")
else:
for product_name, product_price, postage_price in zip(soup.find_all(element_for_name, name_class, recursive=True), soup.find_all(element_for_price, price_class, recursive=True), soup.find_all(postage_element, postage_element_two, recursive=True)):
if product_name and product_price:
strings_from_site.add(f"\"{' '.join(product_name.get_text().strip().split())}\" has a price of £{float(price_str(product_price.get_text()))} with {postage_price.get_text()}")
print(f"\"{' '.join(product_name.get_text().strip().split())}\" has a price of £{float(price_str(product_price.get_text()))} with {postage_price.get_text()}")
elif not product_name or not product_price:
print(f"Soup was unable to find the specified tag '<{element_for_name}>' with an ID or Class of '.{name_class}' and '<{element_for_price}>' with an ID or Class of '.{price_class}' and '<{postage_element}>' with an ID or Class of '.{postage_element_two}'")
if write_to_file:
for thing in strings_from_site:
with open("NameAndPriceContent.txt", "a+", encoding="utf8") as f:
f.write(thing+"\n")
f.close()
except Exception as exception:
print(f"{exception} has been thrown")
def parse_given_xml_for_urls(xml_file, write_to_file, element_for_name, name_class, element_for_price, price_class, postage_element="null", postage_element_two="null"):
try:
with open(xml_file) as fp:
soup = BeautifulSoup(fp, 'lxml-xml')
for url in soup.find_all("loc"):
get_names_and_prices_of_products_on_given_page(url.get_text(), headers, write_to_file,element_for_name, name_class, element_for_price, price_class,postage_element,postage_element_two)
except Exception as exception:
print("{} thrown".format(exception))
def get_user_input():
print("If the script exits without warning after picking website etc, this means one of two things:\n1. The site uses Javascript to dynamically build it's pages, which means we cannot request that page unless you know the specific URL it uses to do so.\n2. The tags/classes you entered do not exist on the page you specified, thus crashing the script..")
option = input("Choose an option:\n1: Import from XML\n2. Import from single URL\n")
if int(option) == 1:
print('You have chosen to import based on a generated XML file.\n------------------')
user_option = input('Input the name of the XML file produced by https://www.xml-sitemaps.com/: ')
elif int(option) == 2:
print('You have chosen to import pased on a single URL.\n----------------------')
user_option = input("What is the URL you would like to import the Products/Prices of?\n")
else:
print(f"{user_option} is an incorrect option")
return
user_confirm = input("Would you like to output this to a file, or not? Y or N\n")
if user_confirm == 'Y':
user_boolean = True
elif user_confirm == 'N':
user_boolean = False
else:
print(f"{user_confirm} is an incorrect option")
return
try:
element_name_one = input("Please enter the HTML element that contains the Products Name (e.g: p):\n")
element_name_two = input("Please enter the class or ID this element has:\n")
price_element = input("Please enter the HTML element that contains the Products Price (e.g: price):\n")
price_element_two = input("Please enter the class or ID this element has:\n")
postage = input("Does this site have postage price: Y or N\n")
if postage == 'Y':
postage_element = input("Please enter the HTML element that contains the Products Postage Price (e.g: postage):\n")
postage_element_two = input("Please enter the class or ID this element has:\n")
else:
postage_element = ""
postage_element_two = ""
if int(option) == 1:
parse_given_xml_for_urls(user_option, user_boolean, element_name_one, element_name_two, price_element, price_element_two, postage_element, postage_element_two)
elif int(option) == 2:
get_names_and_prices_of_products_on_given_page(user_option, headers, user_boolean, element_name_one, element_name_two, price_element, price_element_two, postage_element, postage_element_two)
except Exception as exception:
print(f"{exception} has been thrown")
get_user_input() |
"""urlconf for the base application"""
from django.conf.urls import url, patterns
urlpatterns = patterns('radiator.views',
url(r'^$', 'index', name='index'),
url(r'^alarm/(?P<id>[^/]+)/$', 'alarm', name='alarm'),
# url(r'^info$', 'info', name='info'),
url(r'^light/(?P<val>[^/]+)/$', 'light_action', name='light'),
)
|
#!/usr/bin/env python3
# Imports
from pwn import *
import base64
from Crypto.Util.number import long_to_bytes
# Connection
host = "chal.b01lers.com"
port = 25003
s = remote(host, port)
context.log_level = 'debug'
# Get losing ticket >n<
s.recvuntil('raffle ticket:\n')
ticket = base64.b64decode(s.recvuntil('\n', drop=True).decode())
# Get winning numbers
s.recvuntil('winning numbers are:\n')
lststr = s.recvuntil('\n', drop=True).decode()
winlst = [int(i) for i in lststr[1:-1].split(', ')]
# PADDING ORACLE ENCRYPTION ATTACK
# Construct payload
payload = b'numbers:' + ','.join([str(i) for i in winlst]).encode()
padpay = payload + (16-len(payload)%16) * long_to_bytes(16-len(payload)%16)
print(padpay)
print()
# Our payload ticket (starts off as a random ciphertext)
tick_pl = b'\xaa'*16*5
print(tick_pl)
# For every block in our payload (without iv)
for blc in range(4):
# Get blocks up to current block
rtb = tick_pl[:len(tick_pl)-16*blc]
#print(rtb)
D = b''
# For every byte in our block
for byt in range(1,17):
# Pad accordingly
pb = bytes([D[i] ^ byt for i in range(len(D))][::-1])
padbyt = rtb[:(len(rtb)-16)-(byt-1)] + pb + rtb[-16:]
# Just to be sure :)
assert len(padbyt) == len(rtb)
# Iterate over all possible bytes
b_correct = None
pos_lst = []
for b in range(256):
# Convert to bytearray for item assignment
trytick = bytearray( padbyt )
# Set trial byte
trytick[-16 - byt] = b
# Again, just to be sure :)
assert len(trytick) == len(rtb)
# Time to check the validity of our padding (the attack part)
# Send ticket
s.recvuntil('Redeem a ticket:\n')
s.sendline( base64.b64encode( ticket + trytick ) )
# Get response
resp = s.recvuntil('\n', drop=True).decode()
if resp == 'that is an invalid ticket':
check = False
elif resp == 'sorry, that ticket did not win anything':
check = True
# Keep byte if accepted
if check:
pos_lst.append(b)
break
# Only keep the correct byte (padding degeneracy)
if len(pos_lst) == 1:
b_correct = pos_lst[0]
elif len(pos_lst) == 2:
b1,b2 = pos_lst
if b1 == padbyt[-16 - byt]:
b_correct = b2
if b2 == padbyt[-16 - byt]:
b_correct = b1
else:
print(pos_lst)
raise ValueError('Oh oh...')
# Get decrypted value
D += long_to_bytes(b_correct ^ byt)
# Get payload block
pl_blc = padpay[len(padpay)-16*(blc+1):len(padpay)-16*blc]
# Create ciphertext block i-1 such that encryption of C_i yields P_i
C_rep = bytes([pl_blc[i] ^ D[-(i+1)] for i in range(16)])
# Update our malicious winning ticket >:)
tick_pl = tick_pl[:len(padpay)-16*(blc+1)] + C_rep + tick_pl[len(padpay)-16*blc:]
print()
print(tick_pl)
# Send final winning ticket
s.recvuntil('Redeem a ticket:\n')
s.sendline( base64.b64encode( tick_pl ) )
s.interactive() |
import torch
import time
import numpy as np
import atexit
from collections import defaultdict
cuda_timers = defaultdict(list)
timers = defaultdict(list)
class CudaTimer:
def __init__(self, timer_name=''):
self.timer_name = timer_name
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
def __enter__(self):
self.start.record()
return self
def __exit__(self, *args):
self.end.record()
torch.cuda.synchronize()
cuda_timers[self.timer_name].append(self.start.elapsed_time(self.end))
class Timer:
def __init__(self, timer_name=''):
self.timer_name = timer_name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start # measured in seconds
self.interval *= 1000.0 # convert to milliseconds
timers[self.timer_name].append(self.interval)
def print_timing_info():
print('== Timing statistics ==')
for timer_name, timing_values in [*cuda_timers.items(), *timers.items()]:
timing_value = np.mean(np.array(timing_values))
if timing_value < 1000.0:
print('{}: {:.2f} ms ({} samples)'.format(timer_name, timing_value, len(timing_values)))
else:
print('{}: {:.2f} s ({} samples)'.format(timer_name, timing_value / 1000.0, len(timing_values)))
# this will print all the timer values upon termination of any program that imported this file
atexit.register(print_timing_info)
|
import consul
import sys
c = consul.Consul()
def register_server(server):
c.agent.service.register(
'server-'+server['port'],
service_id='server-'+server['port'],
tags=["primary", "v1"],
port=int(server['port'])
)
if __name__ == "__main__":
num_server = 1
if len(sys.argv) > 1:
num_server = int(sys.argv[1])
print(f"num_server={num_server}")
for each_server in range(num_server):
server_port = "200{}".format(each_server)
print(f"Starting a server at:{server_port}...")
current_server = {
'server': f"tcp://127.0.0.1:{server_port}",
'port': server_port
}
register_server(current_server) |
import datetime
from fabric import Connection
from invoke import Collection
from fabric import Config
from fabric import task
from patchwork import files
from fabfile.core import *
"""
Resque
This also installs basic ruby stuff, including rbenv so that we can
isolate the ruby and gem installs to a known version and not need to
use sudo gem install (only gem install)
The ruby install stuff comes from here:
https://linuxize.com/post/how-to-install-ruby-on-debian-9/
<del>https://www.digitalocean.com/community/tutorials/how-to-install-ruby-on-rails-with-rbenv-on-ubuntu-14-04</del>
"""
def install(c):
util.start()
# New Ruby install using rbenv (this takes a while)
# https://linuxize.com/post/how-to-install-ruby-on-debian-9/
c.sudo('apt-get install -yq git curl libssl-dev libreadline-dev zlib1g-dev')
c.sudo('apt-get install -yq autoconf bison build-essential')
c.sudo('apt-get install -yq libyaml-dev libreadline-dev libncurses5-dev libffi-dev libgdbm-dev')
c.run_with_cd('~', f'echo \'export PATH="/home/{c.config.project.username}/.rbenv/bin:$PATH"\' >> ~/.bashrc')
c.run_with_cd('~', 'echo \'eval "$(rbenv init -)"\' >> ~/.bashrc')
c.run_with_cd('~', 'source ~/.bashrc')
c.run_with_cd('~', f'echo \'export PATH="/home/{c.config.project.username}/.rbenv/bin:$PATH"\' >> ~/.bash_profile')
c.run_with_cd('~', 'echo \'eval "$(rbenv init -)"\' >> ~/.bash_profile')
c.run_with_cd('~', 'source ~/.bash_profile')
#
# 01/21/20 CG: There is still something wrong with this part of the install
# The whole install will fail with the following:
#
# Fatal error: run() received nonzero return code 1 while executing!
# Requested: curl -sL https://github.com/rbenv/rbenv-installer/raw/master/bin/rbenv-installer | bash -
# Executed: /bin/bash -l -c "cd ~ >/dev/null && curl -sL https://github.com/rbenv/rbenv-installer/raw/master/bin/rbenv-installer | bash -"
#
# Aborting.
#
# The fix is to just run the "server_setup" again and the 2nd time around, it
# will install fine (takse a while though
#
c.run_with_cd('~', 'curl -sL https://github.com/rbenv/rbenv-installer/raw/master/bin/rbenv-installer | bash -')
version = '2.6.5' # check ruby-lang.org
c.run(f'rbenv install {version}')
c.run(f'rbenv global {version}')
c.run('ruby -v')
c.run("echo 'gem: --no-document\' > ~/.gemrc")
c.run('gem install bundler')
c.run('gem install json')
c.run('gem install resque')
c.run('gem install unicorn')
# Try to install new tabs on the resque UI
# As of 7/2/19, still cannot get this to work
# https://github.com/mattgibson/resque-scheduler-web
# https://github.com/resque/resque-scheduler/issues/301
c.run('gem install resque-scheduler-web')
util.done()
def configure(c):
util.start()
# Create the resque-web directory structure
c.sudo('mkdir -p /etc/unicorn')
c.sudo('mkdir -p /var/www/resque-web')
c.sudo('mkdir -p /var/www/resque-web/shared')
c.sudo('mkdir -p /var/www/resque-web/config')
c.sudo('mkdir -p /var/www/resque-web/log')
c.sudo('mkdir -p /var/www/resque-web/shared')
c.sudo('chown -R www-data:www-data /var/www/resque-web')
c.sudo('chmod -R 775 /var/www/resque-web')
c.put_template('etc-init.d-unicorn', '/etc/init.d/unicorn', sudo=True)
c.put_template('etc-nginx-resque-web', '/etc/nginx/sites-available/resque-web', sudo=True)
c.put_template('etc-unicorn-resque-web.conf', '/etc/unicorn/resque-web.conf', sudo=True)
c.put_template('var-www-config.ru', '/var/www/resque-web/config.ru', sudo=True)
c.put_template('var-www-unicorn.rb', '/var/www/resque-web/config/unicorn.rb', sudo=True)
c.put_template('var-www-resque.rb', '/var/www/resque-web/config/resque.rb', sudo=True)
# Get some env variables
project_name = c.config.project.name.lower()
# Munge the server_names to create a unique list
# TODO: Move to separate function
server_names = env.config.get('server_names', "")
if (server_names != "" and 'resque' in server_names and server_names['resque'] != ""):
server_names = server_names['resque']
server_names.append(c.host)
server_names = set(server_names)
nginx_server_name = " ".join(server_names)
else:
nginx_server_name = c.host
print(f'Setting nginx server_name: {nginx_server_name}')
c.sed('/etc/nginx/sites-available/resque-web', '{{localhost}}', nginx_server_name, sudo=True)
# Get the Letsencrypt cert up to the nginx
c.sed('/etc/nginx/sites-available/resque-web', '{{project_name}}', c.config.project.name, sudo=True)
#util.sed(c, '/etc/nginx/sites-available/resque-web', '{{project_name}}', c.config.project.name, sudo=True)
# Configure resque to the correct Redis server
redis_host = 'localhost'
redis_port = 6379
redis_password = env.password
if (c.config.redis.host and c.config.redis.host != ''):
redis_host = c.config.redis.host
redis_port = c.config.redis.port
redis_password = c.config.redis.password
print(f'Using redis server @ {redis_host}:{redis_port}')
c.sed('/var/www/resque-web/config.ru', '{{host}}', redis_host, sudo=True)
c.sed('/var/www/resque-web/config.ru', '{{port}}', redis_port, sudo=True)
c.sed('/var/www/resque-web/config.ru', '{{password}}', redis_password, sudo=True)
# Continue configuring resque server
c.sed('/var/www/resque-web/config/resque.rb', '{{password}}', c.config.project.password, sudo=True)
c.sed('/var/www/resque-web/config.ru', '#{{namespace}}', f'Resque.redis.namespace = "{c.config.project.name}"', sudo=True)
if not c.exists('/etc/nginx/sites-enabled/resque-web'):
c.sudo('ln -s /etc/nginx/sites-available/resque-web /etc/nginx/sites-enabled/resque-web')
# Do token replacement on the resque-web nginx for SSL cert
ssl_cert = f'/etc/letsencrypt/live/{c.config.project.name}.com/fullchain.pem'
ssl_cert_key = f'/etc/letsencrypt/live/{c.config.project.name}.com/privkey.pem'
# Replace token
c.sed('/etc/nginx/sites-available/resque-web', '{{ssl_cert}}', ssl_cert, sudo=True)
c.sed('/etc/nginx/sites-available/resque-web', '{{ssl_cert_key}}', ssl_cert_key, sudo=True)
c.sudo('chown root:root /etc/init.d/unicorn')
c.sudo('chmod 775 /etc/init.d/unicorn')
# Have unicorn (resque-web) start on boot
c.sudo('update-rc.d unicorn defaults')
# Restart unicorn and nginx
c.sudo('/etc/init.d/unicorn restart')
c.sudo('/etc/init.d/nginx restart')
util.done()
def namespace(c, namespace):
util.start()
if (c.exists('/var/www/resque-web/config.ru')):
c.sudo('rm /var/www/resque-web/config.ru')
c.put_template('var-www-config.ru', '/var/www/resque-web/config.ru', sudo=True)
# Configure resque to the correct Redis server
redis_host = 'localhost'
redis_port = 6379
redis_password = c.config.project.password
if (c.config.redis.host and c.config.redis.host != ''):
redis_host = c.config.redis.host
redis_port = c.config.redis.port
redis_password =c.config.redis.password
print(f'Using redis server @ {redis_host}:{redis_port}')
c.sed('/var/www/resque-web/config.ru', '{{host}}', redis_host, sudo=True)
c.sed('/var/www/resque-web/config.ru', '{{port}}', redis_port, sudo=True)
c.sed('/var/www/resque-web/config.ru', '{{password}}', redis_password, sudo=True)
#sed('/var/www/resque-web/config.ru', '{{password}}', redis_password, sudo=True)
c.sed('/var/www/resque-web/config.ru', '#{{namespace}}', f'Resque.redis.namespace = "{namespace}"', sudo=True)
c.sudo('/etc/init.d/unicorn restart')
util.done()
|
# You are given an array of k linked-lists lists, each linked-list is sorted in ascending order.
# Merge all the linked-lists into one sorted linked-list and return it.
# Example 1:
# Input: lists = [[1,4,5],[1,3,4],[2,6]]
# Output: [1,1,2,3,4,4,5,6]
# Explanation: The linked-lists are:
# [
# 1->4->5,
# 1->3->4,
# 2->6
# ]
# merging them into one sorted list:
# 1->1->2->3->4->4->5->6
# Example 2:
# Input: lists = []
# Output: []
# Example 3:
# Input: lists = [[]]
# Output: []
|
from PIL import Image, ImageDraw
import random
import numpy as np
INF = 1e9
mode = 'RGB'
im = Image.open('dataset/Lenna.png')
nimg = Image.new(mode, im.size)
draw = ImageDraw.Draw(nimg)
pix = im.getdata()
k = 256
claster = [0] * len(pix)
centers = [[random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for i in range(0, k)]
# print(centers, '\n')
for i in range(0, 3):
# new clusters
for j in range(0, len(pix)):
p = pix[j]
min_dist = INF
best_cent = [-1, -1, -1]
for l, cent in enumerate(centers):
dist = np.linalg.norm(np.array(cent) - np.array(p))
if dist < min_dist:
best_cent = l
min_dist = dist
claster[j] = best_cent
# new centers
for j in range(0, k):
r_sum = 0
g_sum = 0
b_sum = 0
cnt = 0
for l in range(0, len(pix)):
if claster[l] == j:
r_sum += pix[l][0]
g_sum += pix[l][1]
b_sum += pix[l][2]
cnt += 1
if cnt != 0:
centers[j] = [r_sum // cnt, g_sum // cnt, b_sum // cnt]
else:
centers[j] = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
# print(claster)
# print(centers, '\n')
curr = 0
for x in range(0, nimg.size[0]):
for y in range(0, nimg.size[1]):
color = centers[claster[curr]]
draw.point((x, y), (color[0], color[1], color[2]))
curr += 1
nimg.show()
|
# Generated by Django 3.1.7 on 2021-03-07 12:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20210305_1728'),
]
operations = [
migrations.AddField(
model_name='group',
name='is_active',
field=models.BooleanField(default=True),
),
]
|
#!/usr/bin/env python
'''
simple plotting tool
Author: T.M.Perry UW
'''
import ROOT
from ROOT import TH1F,TFile,TCanvas,TLegend,TLine,TLatex
from ROOT import gStyle
import cmsPrelim
#import cmsPrelim as cpr
gStyle.SetOptStat('')
tex = ROOT.TLatex()
tex.SetTextAlign(13)
tex.SetNDC(True)
# for "legend"
xpos = 0.58
ypos = 0.64
big = 0.04
sml = 0.03
rebin_EE = 2
extraName = "_v3"
draw_res = False
draw_res_v_nvtx = False
draw_res_v_rho = False
draw_res = True
#draw_res_v_nvtx = True
#draw_res_v_rho = True
res_line0 = True
varnames = [
"HLT_50ns",
"HLT_25ns",
#"HLT_old",
"RECO_50ns",
"RECO_25ns",
#"RECO_old",
]
c_hlt = ROOT.EColor.kRed
c_rec = ROOT.EColor.kCyan+2
c_pre = ROOT.EColor.kBlack
canx = 800
cany = 900
c1 = TCanvas('c1','c1',canx,cany)
c2 = TCanvas('c2','c2',canx,cany)
c3 = TCanvas('c3','c3',canx,cany)
c2.SetLogz()
c3.SetLogz()
for varname in varnames:
theFile = TFile("./roots/BDTout_%s.root"%(varname))
### ###
# resolution #
### ###
######################################################################
if draw_res:
c1.cd()
# res_preEB = theFile.Get("res_preEB")
# res_preEB.SetName("res_preEB")
# res_preEB.SetLineColor(1)
# res_preEB.SetLineWidth(3)
# res_EBmax = res_preEB.GetMaximum()
# res_EBmean = res_preEB.GetMean()
# res_EBrms = res_preEB.GetRMS()
# res_preEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
#
# res_recoEB = theFile.Get("res_recoEB")
# res_recoEB.SetName("res_recoEB")
# res_recoEB.SetLineColor(c_rec)
# res_recoEB.SetLineWidth(3)
# res_recoEBmax = res_recoEB.GetMaximum()
# res_recoEBmean = res_recoEB.GetMean()
# res_recoEBrms = res_recoEB.GetRMS()
#
# res_hltEB = theFile.Get("res_hltEB")
# res_hltEB.SetName("res_hltEB")
# res_hltEB.SetLineColor(c_hlt)
# res_hltEB.SetLineWidth(2)
# res_hltEBmax = res_hltEB.GetMaximum()
# res_hltEBmean = res_hltEB.GetMean()
# res_hltEBrms = res_hltEB.GetRMS()
#
# #res_legEB=TLegend(0.11,0.6,0.4,0.89)
# #res_legEB.SetFillColor(0)
# #res_legEB.SetBorderSize(0)
# #res_legEB.AddEntry(res_preEB,"unscaled")
# #res_legEB.AddEntry(res_recoEB,"RECO BDT applied")
# #res_legEB.AddEntry(res_hltEB,"HLT BDT applied")
#
# #res_statEB=TLegend(0.6,0.6,0.89,0.89)
# #res_statEB.SetFillColor(0)
# #res_statEB.SetBorderSize(0)
# #res_statEB.AddEntry(res_preEB,"Mean: %0.2f RMS: %0.2f"%(res_EBmean,res_EBrms))
# #res_statEB.AddEntry(res_recoEB,"Mean: %0.2f RMS: %0.2f"%(res_recoEBmean,res_recoEBrms))
# #res_statEB.AddEntry(res_hltEB,"Mean: %0.2f RMS: %0.2f"%(res_hltEBmean,res_hltEBrms))
#
# EBMax = 400
# #EBMax = 1350
# #EBMax = max(res_EBmax,res_recoEBmax,res_hltEBmax)
#
# res_lineEB0 = TLine(0.,0.,0.,1.05*EBMax)
# res_lineEB0.SetLineWidth(1)
# res_lineEB0.SetLineStyle(2)
#
# res_preEB.SetMaximum(1.1*EBMax)
# res_preEB.SetTitle("Barrel")
# res_preEB.Draw("hist")
# res_recoEB.Draw("hist,sames")
# res_hltEB.Draw("hist,sames")
# #res_legEB.Draw("sames")
# #res_statEB.Draw("sames")
#
# tex.SetTextSize(big)
# tex.DrawLatex(xpos,ypos+0.25,"#color[%s]{Unscaled}"%(c_pre))
# tex.DrawLatex(xpos,ypos+0.15,"#color[%s]{HLT Trained}"%(c_hlt))
# tex.DrawLatex(xpos,ypos+0.05,"#color[%s]{RECO Trained}"%(c_rec))
# tex.SetTextSize(sml)
# tex.DrawLatex(xpos,ypos+0.2,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_pre,res_EBmean,res_EBrms))
# tex.DrawLatex(xpos,ypos+0.1,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_hlt,res_recoEBmean,res_recoEBrms))
# tex.DrawLatex(xpos,ypos,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_rec,res_hltEBmean,res_hltEBrms))
# cmsPrelim.prelim_noLumi()
# if res_line0: res_lineEB0.Draw()
#
# c1.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_res_EB%s.png"%(varname,extraName))
res_preEE = theFile.Get("res_preEE")
res_preEE.SetName("res_preEE")
res_preEE.SetLineColor(1)
res_preEE.SetLineWidth(3)
res_EEmax = res_preEE.GetMaximum()
res_EEmean = res_preEE.GetMean()
res_EErms = res_preEE.GetRMS()
res_preEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_recoEE = theFile.Get("res_recoEE")
res_recoEE.SetName("res_recoEE")
res_recoEE.SetLineColor(c_rec)
res_recoEE.SetLineWidth(3)
res_recoEEmean = res_recoEE.GetMean()
res_recoEErms = res_recoEE.GetRMS()
res_recoEEmax = res_recoEE.GetMaximum()
res_hltEE = theFile.Get("res_hltEE")
res_hltEE.SetName("res_hltEE")
res_hltEE.SetLineColor(c_hlt)
res_hltEE.SetLineWidth(2)
res_hltEEmean = res_hltEE.GetMean()
res_hltEErms = res_hltEE.GetRMS()
res_hltEEmax = res_hltEE.GetMaximum()
#res_legEE=TLegend(0.11,0.6,0.4,0.89)
#res_legEE.SetFillColor(0)
#res_legEE.SetBorderSize(0)
#res_legEE.AddEntry(res_preEE,"Unscaled")
#res_legEE.AddEntry(res_recoEE,"RECO BDT applied")
#res_legEE.AddEntry(res_hltEE,"HLT BDT applied")
#res_statEE=TLegend(0.6,0.6,0.89,0.89)
#res_statEE.SetFillColor(0)
#res_statEE.SetBorderSize(0)
#res_statEE.AddEntry(res_preEE,"Mean: %0.2f RMS: %0.3f"%(res_EEmean,res_EErms))
#res_statEE.AddEntry(res_recoEE,"Mean: %0.2f RMS: %0.3f"%(res_recoEEmean,res_recoEErms))
#res_statEE.AddEntry(res_hltEE,"Mean: %0.2f RMS: %0.3f"%(res_hltEEmean,res_hltEErms))
EEMax = 170
#EEMax = 750
#EEMax = max(res_EEmax,res_recoEEmax,res_hltEEmax)
res_lineEE0 = TLine(0.,0.,0.,1.05*EEMax)
res_lineEE0.SetLineWidth(1)
res_lineEE0.SetLineStyle(2)
res_preEE.SetMaximum(1.1*EEMax)
res_preEE.SetTitle("Endcap")
res_preEE.Rebin(rebin_EE)
res_recoEE.Rebin(rebin_EE)
res_hltEE.Rebin(rebin_EE)
res_preEE.Draw("hist")
res_recoEE.Draw("hist,sames")
res_hltEE.Draw("hist,sames")
#res_legEE.Draw("sames")
#res_statEE.Draw("sames")
tex.SetTextSize(big)
tex.DrawLatex(xpos,ypos+0.25,"#color[%s]{Unscaled}"%(c_pre))
tex.DrawLatex(xpos,ypos+0.15,"#color[%s]{HLT Trained}"%(c_hlt))
tex.DrawLatex(xpos,ypos+0.05,"#color[%s]{RECO Trained}"%(c_rec))
tex.SetTextSize(sml)
tex.DrawLatex(xpos,ypos+0.2,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_pre,res_EEmean,res_EErms))
tex.DrawLatex(xpos,ypos+0.1,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_hlt,res_recoEEmean,res_recoEErms))
tex.DrawLatex(xpos,ypos,"#color[%s]{Mean: %0.2f RMS: %0.3f}"%(c_rec,res_hltEEmean,res_hltEErms))
cmsPrelim.prelim_noLumi()
if res_line0: res_lineEE0.Draw()
c1.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_res_EE%s.png"%(varname,extraName))
### ###
# res v nvtx #
### ###
######################################################################
if draw_res_v_nvtx:
c2.cd()
rvmin = 1
rvmax = 2e3
res_v_nvtx_line = TLine(0.,1.,0.,4.)
res_v_nvtx_line.SetLineWidth(1)
res_v_nvtx_line.SetLineStyle(2)
res_v_nvtx_preEB = theFile.Get("res_v_nvtx_preEB")
res_v_nvtx_preEB.SetName("res_v_nvtx_preEB")
res_v_nvtx_preEB.SetTitle("Barrel: Unscaled")
res_v_nvtx_preEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_preEB.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_preEB.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_preEB.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_preEB%s.png"%(varname,extraName))
res_v_nvtx_recoEB = theFile.Get("res_v_nvtx_recoEB")
res_v_nvtx_recoEB.SetName("res_v_nvtx_recoEB")
res_v_nvtx_recoEB.SetTitle("Barrel: RECO Trained")
res_v_nvtx_recoEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_recoEB.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_recoEB.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_recoEB.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_recoEB%s.png"%(varname,extraName))
res_v_nvtx_hltEB = theFile.Get("res_v_nvtx_hltEB")
res_v_nvtx_hltEB.SetName("res_v_nvtx_hltEB")
res_v_nvtx_hltEB.SetTitle("Barrel: HLT Trained")
res_v_nvtx_hltEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_hltEB.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_hltEB.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_hltEB.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_hltEB%s.png"%(varname,extraName))
res_v_nvtx_preEE = theFile.Get("res_v_nvtx_preEE")
res_v_nvtx_preEE.SetName("res_v_nvtx_preEE")
res_v_nvtx_preEE.SetTitle("Barrel: Unscaled")
res_v_nvtx_preEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_preEE.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_preEE.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_preEE.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_preEE%s.png"%(varname,extraName))
res_v_nvtx_recoEE = theFile.Get("res_v_nvtx_recoEE")
res_v_nvtx_recoEE.SetName("res_v_nvtx_recoEE")
res_v_nvtx_recoEE.SetTitle("Barrel: RECO Trained")
res_v_nvtx_recoEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_recoEE.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_recoEE.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_recoEE.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_recoEE%s.png"%(varname,extraName))
res_v_nvtx_hltEE = theFile.Get("res_v_nvtx_hltEE")
res_v_nvtx_hltEE.SetName("res_v_nvtx_hltEE")
res_v_nvtx_hltEE.SetTitle("Barrel: HLT Trained")
res_v_nvtx_hltEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_nvtx_hltEE.GetYaxis().SetTitle("Nr. Primary Vertices")
res_v_nvtx_hltEE.GetZaxis().SetRangeUser(rvmin,rvmax)
res_v_nvtx_hltEE.Draw("colz")
if res_line0: res_v_nvtx_line.Draw()
c2.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVv_hltEE%s.png"%(varname,extraName))
### ###
# res v rho #
### ###
######################################################################
if draw_res_v_rho:
c3.cd()
rrmin = 1
rrmax = 2e3
res_v_rho_line = TLine(0.,0.,0.,2.)
res_v_rho_line.SetLineWidth(1)
res_v_rho_line.SetLineStyle(2)
res_v_rho_preEB = theFile.Get("res_v_rho_preEB")
res_v_rho_preEB.SetName("res_v_rho_preEB")
res_v_rho_preEB.SetTitle("Barrel: Unscaled")
res_v_rho_preEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_preEB.GetYaxis().SetTitle("Rho")
res_v_rho_preEB.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_preEB.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_preEB%s.png"%(varname,extraName))
res_v_rho_recoEB = theFile.Get("res_v_rho_recoEB")
res_v_rho_recoEB.SetName("res_v_rho_recoEB")
res_v_rho_recoEB.SetTitle("Barrel: RECO Trained")
res_v_rho_recoEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_recoEB.GetYaxis().SetTitle("Rho")
res_v_rho_recoEB.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_recoEB.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_recoEB%s.png"%(varname,extraName))
res_v_rho_hltEB = theFile.Get("res_v_rho_hltEB")
res_v_rho_hltEB.SetName("res_v_rho_hltEB")
res_v_rho_hltEB.SetTitle("Barrel: HLT Trained")
res_v_rho_hltEB.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_hltEB.GetYaxis().SetTitle("Rho")
res_v_rho_hltEB.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_hltEB.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_hltEB%s.png"%(varname,extraName))
res_v_rho_preEE = theFile.Get("res_v_rho_preEE")
res_v_rho_preEE.SetName("res_v_rho_preEE")
res_v_rho_preEE.SetTitle("Barrel: Unscaled")
res_v_rho_preEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_preEE.GetYaxis().SetTitle("Rho")
res_v_rho_preEE.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_preEE.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_preEE%s.png"%(varname,extraName))
res_v_rho_recoEE = theFile.Get("res_v_rho_recoEE")
res_v_rho_recoEE.SetName("res_v_rho_recoEE")
res_v_rho_recoEE.SetTitle("Barrel: RECO Trained")
res_v_rho_recoEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_recoEE.GetYaxis().SetTitle("Rho")
res_v_rho_recoEE.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_recoEE.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_recoEE%s.png"%(varname,extraName))
res_v_rho_hltEE = theFile.Get("res_v_rho_hltEE")
res_v_rho_hltEE.SetName("res_v_rho_hltEE")
res_v_rho_hltEE.SetTitle("Barrel: HLT Trained")
res_v_rho_hltEE.GetXaxis().SetTitle("(genEnergy-(scRawEnergy+scPreshowerEnergy))/genEnergy")
res_v_rho_hltEE.GetYaxis().SetTitle("Rho")
res_v_rho_hltEE.GetZaxis().SetRangeUser(rrmin,rrmax)
res_v_rho_hltEE.Draw("colz")
if res_line0: res_v_rho_line.Draw()
c3.Print("/afs/hep.wisc.edu/home/tperry/www/HLT/on_off_comp/7_1_2/BDT/%s_rVr_hltEE%s.png"%(varname,extraName))
|
import logging
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from settings import ARCH_IMP_ROOT, IMP_REFS, DELIM_CSV
import csv
import os
from aluminio.models import Referencia,Acabado,Aleacion,Temple,SapAluminio
COL_REFS = {
'referencia':0,
'acabado':1,
'aleacion':2,
'temple':3,
'largo_mm':4
}
class Command(NoArgsCommand):
help = "Importar Referencias"
def get_acabado(self,nombre):
return Acabado.objects.get(nombre=nombre)
def get_aleacion(self,ref,codigo):
try:
return Aleacion.objects.get(codigo=codigo)
except:
return ref.aleacion_std
def get_temple(self,ref,codigo):
try:
return Temple.objects.get(codigo=codigo)
except:
return ref.temple_std
def get_ref(self,nombre):
return Referencia.objects.get(nombre=nombre)
def sap_alum(self,referencia,acabado,aleacion,temple,largo_mm):
sapalum, sapalum2 = None, None
ref = self.get_ref(referencia)
acab = self.get_acabado(acabado)
ale = self.get_aleacion(ref, aleacion)
tem = self.get_temple(ref, temple)
unbw = ref.unbw
try:
sapalum = SapAluminio.objects.get(referencia=ref,acabado=acab,temple=tem,aleacion=ale,largo_mm=largo_mm,tipo_mat='FERT')
except ObjectDoesNotExist:
sapalum = SapAluminio.objects.create(referencia=ref,acabado=acab,temple=tem,aleacion=ale,largo_mm=largo_mm)
if unbw:
try:
sapalum2 = SapAluminio.objects.get(referencia=ref,acabado=acab,temple=tem,aleacion=ale,largo_mm=largo_mm,tipo_mat='UNBW')
except:
sapalum2 = SapAluminio.objects.create(referencia=ref,acabado=acab,temple=tem,aleacion=ale,largo_mm=largo_mm,tipo_mat='UNBW')
return sapalum, sapalum2
def handle_noargs(self, **options):
arch = os.path.join(ARCH_IMP_ROOT + os.sep + IMP_REFS['arch_alums'])
reader = csv.reader(open(arch,'rU'),delimiter=DELIM_CSV)
i = 0
for row in reader:
i = i + 1
try:
al, al2 = self.sap_alum(
referencia = row[COL_REFS['referencia']],
acabado = row[COL_REFS['acabado']],
aleacion = row[COL_REFS['aleacion']],
temple = row[COL_REFS['temple']],
largo_mm = row[COL_REFS['largo_mm']]
)
#print al
#if al2: print al2
except:
print 'Error al importar fila #%i %s' % (i,str(row)) |
import json
import boto3
user_table = 'user-profile'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(user_table)
cognito = boto3.client('cognito-idp')
def lambda_handler(event, context):
access_token = event['headers']['access_token']
try:
resp = cognito.get_user(
AccessToken=access_token,
)
except:
return {
'statusCode': 500,
'body': json.dumps('Error in your login'),
"headers": {
'Content-Type': 'application/json',
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
user = {i['Name']:i['Value'] for i in resp['UserAttributes']}
user_id = user['email']
resp = table.get_item(Key={'user_id': user_id})
the_user = resp.get('Item')
if the_user:
return {
'statusCode': 200,
'body': json.dumps(True),
"headers": {
'Content-Type': 'application/json',
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
else:
return {
'statusCode': 200,
'body': json.dumps(False),
"headers": {
'Content-Type': 'application/json',
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
|
celery_config = {
"broker_url": "redis://localhost:6379/3",
"result_backend": "redis://localhost:6379/3",
}
|
from flask import Flask, render_template, request
from faker import Faker
import random
import requests
from bs4 import BeautifulSoup
fake = Faker('ko_KR')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/naver/')
def naver():
return render_template('naver.html')
@app.route('/daum/')
def daum():
return render_template('daum.html')
@app.route('/pastlife/')
def pastlife():
return render_template('pastlife.html')
# 이름과 가짜 직업을 알려준다.
storage = {}
@app.route('/result')
def result():
name = request.args.get('name')
# Requests랑 다름. 이건 flask가 들고있는것임
# argument 중에 이름이 name인 변수에 들어있는 것을
# 가지고 온다.
if name in storage.keys() :
job = storage[name]
# 1. 우리 데이터에 해당하는 이름이 있는지 없는지 확인
# 2. 있다면 => names에 저장된 직업을 보여줌
else :
job = fake.job()
storage[name] = job
# 3. 없다면 => 랜덤으로 fake 직업을 보여줌, 데이터에 저장
return render_template('result.html', job = storage[name], name = name)
@app.route('/goonghap')
def goonghap():
return render_template('goonghap.html')
babo = []
# 변수 + 변수로 dict에 저장했을 때
# storage_cp = {}
# @app.route('/destiny')
# def destiny():
# person1 = request.args.get('person1')
# person2 = request.args.get('person2')
# couple = person1 + person2
# if couple in storage_cp:
# love_per = storage_cp[couple]
# else :
# love_per = random.choice(range(51,101))
# storage_cp[couple] = love_per
# global babo
# babo.append(person1 + '&' + person2)
# return render_template('destiny.html', person1 = person1, person2 = person2, love_per = love_per)
# dict in dict 으로 coding 했을때 ==
storage_des = {}
@app.route('/destiny')
def destiny():
person1 = request.args.get('person1')
person2 = request.args.get('person2')
if person1 in storage_des:
if person2 in storage_des[person1] :
love_per = storage_des[person1][person2]
else :
love_per = random.randint(51,101)
storage_des[person1][person2] = love_per
else :
love_per = random.randint(51,101)
storage_des[person1] = {person2 : love_per}
return render_template('destiny.html', person1 = person1, person2 = person2, love_per = love_per)
babos = ''
@app.route('/admin')
def baba():
global babos
for k, v in storage_des.items() :
for a in v :
babos = babos + f'{k} ♥ {a}\n'
return '바보들 \n'+ babos
# return render_template('babo.html', babos = babos)
@app.route('/opgg')
def opgg():
return render_template('opgg.html')
@app.route('/search')
def search():
username = request.args.get('id')
url = "https://www.op.gg/summoner/userName="+ username
res = requests.get(url).text
doc = BeautifulSoup(res, 'html.parser')
lose = doc.select_one("#SummonerLayoutContent > div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div > div.TierRankInfo > div.TierInfo > span.WinLose > span.losses").text
win = doc.select_one("#SummonerLayoutContent > div.tabItem.Content.SummonerLayoutContent.summonerLayout-summary > div.SideContent > div.TierBox.Box > div > div.TierRankInfo > div.TierInfo > span.WinLose > span.wins").text
winr = int(win[0:3])/(int(lose[0:3])+int(win[0:3]))
if winr < 0.4 :
msg = "롤을 접으시는 것을 추천합니다."
elif winr < 0.55 :
msg = "롤하면서 스트레스 받으시죠?"
elif winr < 0.7 :
msg = "롤좀 하시네요?"
else :
msg = "대리하세요?"
winrate = str(winr * 100) +'%'
return render_template('search.html', win = win.replace('W','승'), lose = lose.replace('L','패'), username = username, winrate = winrate, msg = msg)
if __name__ == '__main__':
app.run(debug = True) |
def Madlib():
a = input('Adjective: ')
b = input('Noun: ')
c = input('Past tense verb: ')
d = input('Adverb: ')
e = input('Adjective: ')
f = input('Noun: ')
print(f'Today I went to the zoo. I saw a(n) {a} {b} jumping up and down in its tree. He {c} {d} through the large tunnel that led to its {e} {f}. ')
Madlib() |
import smbus
import numpy
# For gyro
class L3GD20:
WHO_AM_I = 0X0F
CTRL_REG1 = 0X20
CTRL_REG2 = 0X21
CTRL_REG3 = 0X22
CTRL_REG4 = 0X23
CTRL_REG5 = 0X24
REFERENCE = 0X25
OUT_TEMP = 0X26
STATUS_REG = 0X27
OUT_X_L = 0X28
OUT_X_H = 0X29
OUT_Y_L = 0X2A
OUT_Y_H = 0X2B
OUT_Z_L = 0X2C
OUT_Z_H = 0X2D
FIFO_CTRL_REG = 0X2E
FIFO_SRC_REG = 0X2F
INT1_CFG = 0X30
INT1_SRC = 0X31
INT1_TSH_XH = 0X32
INT1_TSH_XL = 0X33
INT1_TSH_YH = 0X34
INT1_TSH_YL = 0X35
INT1_TSH_ZH = 0X36
INT1_TSH_ZL = 0X37
INT1_DURATION = 0X38
GYRO_GAIN_250DPS = 0X00
GYRO_GAIN_500DPS = 0X10
GYRO_GAIN_2000DPS = 0X20
GYRO_UPDATE_CONTINOUS = 0X00
GYRO_UPDATE_READ = 0X80
def __init__(self, ad=0x6A, ch=1):
self.address = ad
self.channel = 1
self.bus = smbus.SMBus(ch)
self.gGain = 8.75/1000
def init(self, g=GYRO_GAIN_250DPS, u=GYRO_UPDATE_CONTINOUS):
result = self.bus.read_byte_data(self.address, self.WHO_AM_I)
if result == 0XD4:
print("Found l3gd20 on channel 1")
else:
print("Unknown device")
return
self.bus.write_byte_data(self.address, self.CTRL_REG1, 0X0F)
r = self.bus.read_byte_data(self.address, self.CTRL_REG4)
r = r | g | u
self.bus.write_byte_data(self.address, self.CTRL_REG4, r)
if g == self.GYRO_GAIN_250DPS:
self.gGain = 8.75/1000
elif g == self.GYRO_GAIN_500DPS:
self.gGain = 17.50/1000
elif g == self.GYRO_GAIN_2000DPS:
self.gGain = 70/1000
def readGyros(self, debug=0):
# if debug==1:
# rd = self.bus.read_byte_data(self.address, self.STATUS_REG);
# print("B ",hex(rd));
xl = self.bus.read_byte_data(self.address, self.OUT_X_L)
xh = self.bus.read_byte_data(self.address, self.OUT_X_H)
yl = self.bus.read_byte_data(self.address, self.OUT_Y_L)
yh = self.bus.read_byte_data(self.address, self.OUT_Y_H)
zl = self.bus.read_byte_data(self.address, self.OUT_Z_L)
zh = self.bus.read_byte_data(self.address, self.OUT_Z_H)
# if debug==1:
# rd = self.bus.read_byte_data(self.address, self.STATUS_REG);
# print("A ",hex(rd));
gx = (xh << 8) | xl
gy = (yh << 8) | yl
gz = (zh << 8) | zl
gx = gx-0XFFFE if gx > 0X7FFF else gx
gy = gy-0XFFFE if gy > 0X7FFF else gy
gz = gz-0XFFFE if gz > 0X7FFF else gz
# if (gx>0X7FFF):
# gx = gx - 0XFFFE;
# if (gy>0X7FFF):
# gy = gy - 0XFFFE;
# if (gz>0X7FFF):
# gz = gz - 0XFFFE;
# print(gx,gy,gz);
#gx = round(gx*self.gGain,3);
#gy = round(gy*self.gGain,3);
#gz = round(gz*self.gGain,3);
return [gx*self.gGain, gy*self.gGain, gz*self.gGain]
def en_FIFO(self):
rd = self.bus.read_byte_data(self.address, self.CTRL_REG5)
self.bus.write_byte_data(self.address, self.CTRL_REG5, rd | 0X40)
self.bus.write_byte_data(self.address, self.FIFO_CTRL_REG, 0X5E)
return
def de_FIFO(self):
rd = self.bus.read_byte_data(self.address, self.CTRL_REG5)
self.bus.write_byte_data(self.address, self.CTRL_REG5, rd & 0XBF)
return
def read_FIFO(self, debug=0):
rd = self.bus.read_byte_data(self.address, self.FIFO_SRC_REG)
if debug == 1:
print("B: ", hex(rd))
data = numpy.empty((0, 3), float)
i = rd & 0X1F
j = 0
while j < i:
k = self.readGyros()
data = numpy.append(data, numpy.array([k]), axis=0)
j = j + 1
if debug == 1:
rd = self.bus.read_byte_data(self.address, self.FIFO_SRC_REG)
print("A:", hex(rd))
print("E")
return data
def read_status(self):
return [self.bus.read_byte_data(self.address, self.STATUS_REG), self.bus.read_byte_data(self.address, self.FIFO_SRC_REG)]
|
# gen.py - use whenever sequencing is needd
# top-level syntax, function -> underscore method
# x() __call__
def add1(x, y):
return x + y
class Adder:
def __init__(self):
self.z = 0
def __call__(self, x, y):
self.z += 1
return x + y + self.z
add2 = Adder()
from time import sleep
# This example has storage... and has eager return of the result sets
def compute():
rv = []
for i in range(10):
sleep(.5)
rv.append(i)
return rv
'''
Wasteful because we have to wait for the entire action to complete
and be read into memory, when we really just care about each
number (one by one)
'''
class Compute:
def __call__(self):
rv = []
for i in range(100000):
sleep(5)
rv.append(i)
return rv
def __iter__(self):
self.last = 0
return self
def __next__(self):
rv = self.last
self.last += 1
if self.last > 10:
raise StopIteration()
sleep(.5)
return self.last
#This is too ugly to read ^^^
# This is a generator... don't eagerly compute. Return to user as they ask for it...
def compute():
for i in range(10):
# performs some computation
sleep(.5) # sleep models the complex computation
# give the value back to the user to do something
yield i # -> used for generators/sequencing
'''
Core concept and mental model of a generator
Instead of eagerly computing values you give
it to the user as they ask for it
Let a little library code run, then
let a little user code run
Let a little library code run, then
let a little user code run
Interleave them
Core conceptualization of generators
'''
for val in compute():
# user do what ever they want to do with value
print(val)
# for x in xs:
# pass
# xi = iter(xs) -> __iter__
# while True:
# x = next(xi) -> __next__
class Api:
def run_this_first(self):
first()
def run_this_second(self):
second()
def run_this_last(self):
last()
# can ensure that the first func will always
# run before the second and third
def api():
first()
yield
second()
yield
last()
|
'''
Online Python Compiler.
Code, Compile, Run and Debug python program online.
Write your code in this editor and press "Run" button to execute it.
'''
def circle(mat):
mat1 = mat[0]
mat2 = mat[1]
mat3 = mat[2]
a = []
b = []
if len(mat1)==len(mat2)==len(mat3):
for i in range(len(mat1)):
if(mat1[i] and mat2[i]==1):
a.append([mat1[i],mat2[i]])
if(mat2[i] and mat3[i]==1):
b.append([mat2[i],mat3[i]])
if len(a)==len(b):
print("1")
elif len(a)>len(b):
print(len(a))
else:
print(len(b))
arr = [list(map(int,input().strip().split())) for i in range(3)]
print(arr)
circle(arr)
|
import unittest
import BinaryTree
class MyTest(unittest.TestCase):
def testIsEmpty(self):
t = BinaryTree.Tree()
self.assertEqual(t.IsEmpty(), True)
def testFind(self):
t = BinaryTree.Tree()
t.add(7)
node = BinaryTree.Node(7)
self.assertEqual(t.find(7).Value(), node.Value())
if __name__ == '__main__':
unittest.main()
|
"""
Read through a text file on disk. Use a dict to track how many words of each length are in
the file — that is, how many 3-letter words, 4-letter words, 5-letter words, and so forth.
Display your results
"""
import sys, operator
# Uncomment the line below to read the script file as the input text file and comment line 12 or provide a valid file path on line 12
# inputFilePath = sys.argv[0]
inputFilePath = "04-extra-02-word-length-tracker-text-file.txt"
wordTracker = {}
with open (inputFilePath) as f:
for line in f.readlines():
for word in line.split():
if wordTracker.get(len(word)):
wordTracker[len(word)] += 1
else:
wordTracker[len(word)] = 1
sortedList = sorted(wordTracker.items(), key=operator.itemgetter(0))
for item in sortedList:
print(f"Number of {item[0]} letter words: {item[1]}")
|
"""Common settings and globals."""
import os
import environ
from os.path import basename
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path("clowning_around")
env = environ.Env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Site name:
SITE_NAME = basename(ROOT_DIR)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "cwyhspevbgpuhn(#iybxrc_r)l=gbks1)qxjri!rxi@b3kn)&8"
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
THIRD_PARTY_APPS = [
"emoji_picker",
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
]
LOCAL_APPS = ["users.apps.UsersConfig", "appointments.apps.AppointmentsConfig"]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
STATIC_ROOT = str(ROOT_DIR("staticfiles"))
STATIC_URL = "/static/"
STATICFILES_DIRS = [str(APPS_DIR.path("static"))]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
MEDIA_ROOT = str(APPS_DIR("media"))
MEDIA_URL = "/media/"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [str(APPS_DIR.path("templates"))],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
CRISPY_TEMPLATE_PACK = "bootstrap4"
# SECURITY
# SESSION_COOKIE_HTTPONLY = True
# CSRF_COOKIE_HTTPONLY = True
# SECURE_BROWSER_XSS_FILTER = True
# X_FRAME_OPTIONS = "ALLOW"
# # EMAIL
# EMAIL_BACKEND = env(
# "DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
# )
# EMAIL_TIMEOUT = 5
ROOT_URLCONF = "ClowningAround.urls"
WSGI_APPLICATION = "ClowningAround.wsgi.application"
# Database
DATABASES = {
"OPTIONS": {"charset": "utf8mb4"},
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
},
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Django Admin URL
ADMIN_URL = "admin/"
ADMINS = [("""Ntuli Siya""", "ntulisj1@gmail.com")]
MANAGERS = ADMINS
# AUTHENTICATION
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
LOGIN_URL = "account_login"
# Internationalization
LANGUAGE_CODE = "en-ZA"
TIME_ZONE = "Africa/Johannesburg"
USE_I18N = True
USE_L10N = True
USE_TZ = True
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
|
from PyQt4.QtGui import QMainWindow, QAction, QIcon, qApp, QFileDialog, QMessageBox, QPrintDialog, QDialog, QPrinter, \
QPainter
import pickle
from ReactionFile import ReactionFile
from gui.ReactionProfile import ReactionProfile
from gui.ReactionWidget import ReactionWidget
class MainWindow(QMainWindow):
def __init__(self):
super(QMainWindow, self).__init__()
self._TITLE = 'Equilibrium Simulator 2015'
self.setGeometry (0, 22, 1200, 400)
self.setWindowTitle(self._TITLE)
self._showorhide = "Show "
self._printbox = None
self._printer = QPrinter()
self._Squares = []
# Menu bar
self.MenuBar = self.menuBar()
self.filemenu = self.MenuBar.addMenu('File')
self.optionsmenu = self.MenuBar.addMenu('Options')
self.aboutmenu = self.MenuBar.addMenu('About')
# File - New
newfile = QAction(QIcon('exit.png'), 'New', self)
newfile.setStatusTip('Create a new file')
self.filemenu.addAction(newfile)
newfile.triggered.connect(self.shownew)
# File - Open
openfile = QAction(QIcon('open.png'), 'Open', self)
openfile.setStatusTip('Open a file')
self.filemenu.addAction(openfile)
openfile.triggered.connect(self.showopen)
# File - Save
savefile = QAction(QIcon('save.png'), 'Save', self)
savefile.setStatusTip('Save the current file')
self.filemenu.addAction(savefile)
savefile.triggered.connect(self.save)
# File - Save as
saveasfile = QAction(QIcon('save.png'), 'Save as', self)
saveasfile.setStatusTip('Save the current file as a different file')
self.filemenu.addAction(saveasfile)
saveasfile.triggered.connect(self.showsaveas)
# File - Print
printfile = QAction(QIcon('save.png'), 'Print', self)
printfile.setStatusTip('Print the displayed reactions')
self.filemenu.addAction(printfile)
printfile.triggered.connect(self.showprint)
# File - Exit
exitaction = QAction(QIcon('exit.png'), '&Exit', self)
exitaction.setStatusTip('Exit the program')
exitaction.triggered.connect(qApp.quit)
self.filemenu.addAction(exitaction)
# Options - Edit Conditions
editconds = QAction(QIcon('exit.png'), 'Edit Conditions', self)
editconds.setStatusTip('Edit the conditions of the current reaction')
editconds.triggered.connect(self.editcd)
self.optionsmenu.addAction(editconds)
# About - Version
version = QAction(QIcon('exit.png'), 'Version 1.1', self)
version.setStatusTip('The version of this program you are using')
# Widget of editable reactions
self._ReactionsWindow = ReactionWidget()
self._ReactionsWindow.setGeometry(0, 20, 600, 380)
self._ReactionsWindow.setParent(self)
# Widget of non-editable reaction, for comparison
self._ComparingProfile = ReactionProfile(self._ReactionsWindow.GetCurrentReaction(), True)
self._ComparingProfile.setGeometry(600, 40, 600, 380)
self._ComparingProfile.setParent(self)
self.aboutmenu.addAction(version)
self.show()
# You should call the shownew(), showopen() etc. methods,
# not the new(), open() etc. methods, as a) they do not provide user options;
# b) it may lead to errors; c) the showing methods call them anyway
def editcd(self):
self._ReactionsWindow.EditReaction()
def showopen(self):
fileaddress = QFileDialog.getOpenFileName(self, "Open...", "", "Simulator Files (*.rctn)")
if fileaddress != "":
self.open(fileaddress)
self.setWindowTitle(fileaddress+" - "+self._TITLE)
def open(self, fileaddress):
openfrom = open(fileaddress, "rb")
loadedfile = pickle.load(openfrom)
openfrom.close()
file = open("last.ptr", "w")
file.write(fileaddress)
file.close()
self._ReactionsWindow.SetOpenReactionFile(loadedfile)
self.setWindowTitle(fileaddress+" - "+self._TITLE)
def save(self):
self._ReactionsWindow.save()
def showsaveas(self):
self._ReactionsWindow.showsaveas()
def shownew(self):
self._newfilebox = QMessageBox()
self._newfilebox.setWindowTitle("Save?")
self._newfilebox.setText("Would you like to save this file before opening a new one?")
self._newfilebox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
self._newfilebox.setDefaultButton(QMessageBox.Yes)
if self._newfilebox.exec_() == QMessageBox.Yes:
file = open("last.ptr", "w")
file.write("")
file.close()
self.save()
self.setWindowTitle(self._TITLE)
self._ReactionsWindow.SetOpenReactionFile(ReactionFile())
def showprint(self):
self._printbox = QPrintDialog(self)
if self._printbox.exec_() == QDialog.Accepted:
self.printout()
def printout(self):
# Page width, page height, widget width
pw = self._printer.pageRect().width()
ph = self._printer.pageRect().height()
ww = self._ComparingProfile.width()
painter = QPainter(self._printer)
scale = (ww / pw) * 1.5
painter.scale(scale, scale)
self._ReactionsWindow.currentWidget().render(painter)
painter.translate(0, ph/2)
self._ComparingProfile.render(painter)
painter.translate(pw * 0.6, -ph/2)
painter.scale(0.8, 0.8)
self._ReactionsWindow.currentWidget().PrintGraph(painter, "Concentration")
painter.translate(0, ph*0.25)
self._ReactionsWindow.currentWidget().PrintGraph(painter, "Rate")
painter.translate(0, ph * 0.4)
self._ComparingProfile.PrintGraph(painter, "Concentration")
painter.translate(0, ph * 0.25)
self._ComparingProfile.PrintGraph(painter, "Rate")
painter.end()
# GETTERS AND SETTERS
def GetWindowTitle(self):
return self._TITLE
def SetComparingReaction(self, reaction):
self._ComparingProfile.SetReaction(reaction)
self._ComparingProfile.update()
def GetComparingReaction(self):
return self._ComparingProfile.GetReaction() |
"""Read nifti (nii.gz) files."""
import os
import numpy as np
from .dependencies import is_nibabel_installed
from .path import path_to_visbrain_data
from ..utils.transform import array_to_stt
from vispy.visuals.transforms import (MatrixTransform, ChainTransform,
STTransform)
def read_nifti(path, hdr_as_array=False):
"""Read data from a NIFTI file using Nibabel.
Parameters
----------
path : string
Path to the nifti file.
Returns
-------
vol : array_like
The 3-D volume data.
header : Nifti1Header
Nifti header.
transform : VisPy.transform
The transformation
"""
is_nibabel_installed(raise_error=True)
import nibabel as nib
# Load the file :
img = nib.load(path)
# Get the data and affine transformation ::
vol = img.get_data()
affine = img.affine
# Replace NaNs with 0. :
vol[np.isnan(vol)] = 0.
# Define the transformation :
if hdr_as_array:
transform = affine
else:
transform = array_to_stt(affine)
return vol, img.header, transform
def read_mist(name):
"""Load MIST parcellation.
See : MIST: A multi-resolution parcellation of functional networks
Authors : Sebastian Urchs, Jonathan Armoza, Yassine Benhajali,
Jolène St-Aubin, Pierre Orban, Pierre Bellec
Parameters
----------
name : string
Name of the level. Use MIST_x with x 7, 12, 20, 36, 64, 122 or ROI.
Returns
-------
vol : array_like | None
ROI volume.
labels : array_like | None
Array of labels.
index : array_like | None
Array of index that make the correspondance between the volume values
and labels.
hdr : array_like | None
Array of transform source's coordinates into the volume space.
"""
name = name.upper()
assert ('MIST' in name) and ('_' in name)
level = name.split('_')[-1]
assert level in ['7', '12', '20', '36', '64', '122', 'ROI']
# Define path :
parc, parc_info = '%s.nii.gz', '%s.csv'
folder, folder_info = 'Parcellations', 'Parcel_Information'
mist_path = path_to_visbrain_data('mist', 'roi')
parc_path = os.path.join(*(mist_path, folder, parc % name))
parc_info_path = os.path.join(*(mist_path, folder_info, parc_info % name))
# Load info :
m = np.genfromtxt(parc_info_path, delimiter=';', dtype=str, skip_header=1,
usecols=[0, 1, 2])
n_roi = m.shape[0]
index = m[:, 0].astype(int)
lab_, name_ = 'label_%s' % level, 'name_%s' % level
labels = np.zeros(n_roi, dtype=[(lab_, object), (name_, object)])
labels[lab_] = m[:, 1]
labels[name_] = np.char.replace(np.char.capitalize(m[:, 2]), '_', ' ')
# Load parc :
vol, _, hdr = read_nifti(parc_path, hdr_as_array=True)
return vol, labels, index, hdr
def _niimg_var(vol, hdr):
"""Get transformation variables.
Parameters
----------
vol : array_like
The 3D array of the volume.
hdr : array_like
The (4, 4) transformation array.
Returns
-------
sh : array_like
Shape of the volume
diag : array_like
Diagonale of the transformation
tr : array_like
Translation of the transformation
"""
assert vol.ndim == 3, "Volume should be an (n_x, n_y, n_z) array"
n_x, n_y, n_z = vol.shape
assert isinstance(hdr, (MatrixTransform, np.ndarray))
if isinstance(hdr, MatrixTransform):
affine = np.array(hdr.matrix).copy()
# Get diagonal and translation
d_x, d_y, d_z = np.diag(affine)[0:-1]
t_x, t_y, t_z = affine[-1, 0:-1]
return np.array(vol.shape), np.diag(affine)[0:-1], affine[-1, 0:-1]
def _niimg_norm(sh, diag, translate):
"""Normalize the volume between (0., 1.)."""
# Compute normalization ratio
ratio = np.abs(diag) * sh
sgn = np.sign(diag)
# Get scale and translate
sc = 1. / ratio
tr = -(translate + np.array([0., 0, 0])) / ratio
# Define transformations of each slice
sg_norm = STTransform(scale=(sc[1], sc[2], 1.),
translate=(tr[1], tr[2], 1.))
cr_norm = STTransform(scale=(sc[0], sc[2], 1.),
translate=(sgn[0] * tr[0], tr[2], 1.))
ax_norm = STTransform(scale=(sc[1], sc[0], 1.),
translate=(tr[1], sgn[0] * tr[0], 1.))
return sg_norm, cr_norm, ax_norm
def _niimg_rot():
"""Get rotation trnasformations of each slice."""
# Sagittal
sg_rot = MatrixTransform()
sg_rot.rotate(90., (0, 0, 1))
sg_rot.rotate(180., (0, 1, 0))
# Coronal
cr_rot = MatrixTransform()
cr_rot.rotate(90., (0, 0, 1))
cr_rot.rotate(180., (0, 1, 0))
# Axial
ax_rot = MatrixTransform()
ax_rot.rotate(180., (1, 0, 0))
return sg_rot, cr_rot, ax_rot
def _niimg_mat(hdr, idx):
"""Get the transformation of a single slice.
Parameters
----------
hdr : array_like
The (4, 4) transformation array.
idx : tuple
Slices indicies.
Returns
-------
tf : MatrixTransform
Image transformation.
"""
hdr_mat = np.array(hdr.matrix).copy().T
mat = np.identity(4, dtype=np.float32)
to_idx = [[idx[0]], [idx[1]]], [idx[0], idx[1]]
mat[[[0], [1]], [0, 1]] = hdr_mat[to_idx]
mat[[0, 1], -1] = hdr_mat[[idx[0], idx[1]], -1]
return MatrixTransform(mat.T)
def _niimg_mni(hdr):
"""Transformation for MNI conversions of each slice."""
sg_mni = _niimg_mat(hdr, (2, 1))
cr_mni = _niimg_mat(hdr, (2, 0))
ax_mni = _niimg_mat(hdr, (1, 0))
return sg_mni, cr_mni, ax_mni
def niimg_to_transform(vol, hdr, as_bgd=True, vol_bgd=None, hdr_bgd=None):
"""Get transformations of nii.gz files for cross-sections.
Parameters
----------
vol : array_like
3D volume data.
hdr : array_like
Array of transformation of shape (4, 4).
as_bgd : bool | True
Specify if the volume is a background image or have to be considered as
an activation image.
vol_bgd : array_like | None
Volume data if `as_bgd` is True.
hdr_bgd : array_like | None
Transformation array if `as_bgd` is True.
Returns
-------
sg_tf : ChainTransform
Transformation of sagittal view
cr_tf : ChainTransform
Transformation of coronal view
ax_tf : ChainTransform
Transformation of axial view
"""
# Get transformation variables
sh_img, diag_img, tr_img = _niimg_var(vol, hdr)
# Get the normalization transformation depending if the volume is an image
# background or an activation image
if as_bgd: # Background image
sg_norm, cr_norm, ax_norm = _niimg_norm(sh_img - 1, diag_img, tr_img)
else: # Activation image
sh_bgd, diag_bgd, tr_bgd = _niimg_var(vol_bgd, hdr_bgd)
sg_norm, cr_norm, ax_norm = _niimg_norm(sh_bgd - 1, diag_bgd, tr_bgd)
# Get MNI and rotation transformations
sg_mni, cr_mni, ax_mni = _niimg_mni(hdr)
sg_rot, cr_rot, ax_rot = _niimg_rot()
# Build the chain of transformation
sg_tf = ChainTransform([sg_norm, sg_rot, sg_mni])
cr_tf = ChainTransform([cr_norm, cr_rot, cr_mni])
ax_tf = ChainTransform([ax_norm, ax_rot, ax_mni])
return sg_tf, cr_tf, ax_tf
|
import downloader
from utils import Soup, urljoin, Downloader, LazyUrl, get_imgs_already, clean_title, get_ext, get_print, errors, check_alive
from constants import try_n
import ree as re, os
from timee import sleep
import page_selector
from translator import tr_
import json
class Page:
def __init__(self, url, title, p):
self.url = url
self.title = title
self.p = p
class Image:
def __init__(self, url, page, p):
ext = get_ext(url)
self.filename = '{}/{:04}{}'.format(clean_title(page.title), p, ext)
self.url = LazyUrl(page.url, lambda _: url, self)
class Info:
def __init__(self, id, title, artist):
self.id = id
self.title = title
self.artist = artist
class Downloader_navertoon(Downloader):
type = 'navertoon'
URLS = ['comic.naver.com']
MAX_CORE = 8
MAX_SPEED = 4.0
display_name = 'Naver Webtoon'
def init(self):
self.__info, _ = get_pages(self.url, self.cw)
@classmethod
def fix_url(cls, url):
url = re.sub(r'[?&]page=[0-9]+', '', re.sub(r'[?&]no=[0-9]+', '', url)).replace('m.comic.naver.', 'comic.naver.')
url = url.replace('detail.nhn', 'list.nhn').replace('/detail?', '/list?')
return url.rstrip('#')
@property
def name(self):
id = self.__info.id
title = self.__info.title
artist = self.__info.artist
title = self.format_title('N/A', id, title, artist, 'N/A', 'N/A', 'Korean', prefix='navertoon_')
return clean_title(title)
def read(self):
self.title = tr_('읽는 중... {}').format(self.name)
imgs = get_imgs_all(self.url, self.name, cw=self.cw)
for img in imgs:
if isinstance(img, Image):
self.urls.append(img.url)
else:
self.urls.append(img)
self.title = self.name
def set_no(url, p):
if '&no=' not in url:
url = url + f'&no={p}'
return url
url = re.sub('&no=[0-9]+', f'&no={p}', url)
return url
def get_id(url):
return int(url.lower().split('titleid=')[1].split('&')[0])
def set_page(url, p):
if '&page=' in url:
url = re.sub('&page=[0-9]+', f'&page={p}', url)
else:
url += f'&page={p}'
return url
@try_n(4)
def get_pages(url, cw=None):
print_ = get_print(cw)
url = Downloader_navertoon.fix_url(url).replace('comic.naver.', 'm.comic.naver.')
id = get_id(url)
print('id:', id)
print(url)
html = downloader.read_html(url)
soup = Soup(html)
if soup.find('button', class_='btn_check'):
raise errors.LoginRequired()
try:
info = soup.find('div', class_='area_info')
artist = info.find('span', class_='author').text.strip()
except Exception as e:
print(e)
try:
title = ('\n').join(soup.find('div', class_='title').text.strip().split('\n')[:-1]).strip()
except:
title = 'artist not found'
raise Exception(title)
print_('artist: {}'.format(artist))
title = soup.find('meta', {'property': 'og:title'}).attrs['content']
pages = []
nos = set()
for p in range(1, 100):
if p == 1:
url_page = url
else:
url_page = set_page(url, p)
html = downloader.read_html(url_page)
print('read page:', url_page)
soup = Soup(html)
view = soup.findAll('ul', class_='section_episode_list')[(-1)]
for lst in view.findAll('li'):
url_page = urljoin(url, lst.find('a').attrs['href'])
if 'detail.nhn' not in url_page.lower() and 'detail?' not in url_page.lower(): #3540
continue
print_('url_page: {}'.format(url_page))
text = lst.find('strong', class_='title').find('span', class_='name').text.strip()
no = int(re.findall('[?&]no=([0-9]+)', url_page)[0])
if no in nos:
print('duplicate no: {}'.format(no))
continue
nos.add(no)
text = '{:04} - {}'.format(no, text)
page = Page(url_page, text, p)
pages.append(page)
btn_next = soup.find('a', class_='btn_next')
if btn_next is None or btn_next.attrs['href'] == '#':
print('end of page')
break
info = Info(id, title, artist)
return (
info, pages)
@page_selector.register('navertoon')
@try_n(4)
def f(url):
url = Downloader_navertoon.fix_url(url)
info, pages = get_pages(url)
return pages
@try_n(6)
def get_imgs(page, cw=None):
print_ = get_print(cw)
html = downloader.read_html(page.url)
soup = Soup(html)
type_ = re.find('''webtoonType *: *['"](.+?)['"]''', html)
print_('type: {}'.format(type_))
imgs = []
if type_ == 'DEFAULT': # https://m.comic.naver.com/webtoon/detail.nhn?titleId=715772
view = soup.find('div', class_='toon_view_lst')
for img in view.findAll('img'):
img = img.attrs.get('data-src')
if not img:
continue
img = urljoin(page.url, img)
img = Image(img, page, len(imgs))
imgs.append(img)
elif type_ == 'CUTTOON': # https://m.comic.naver.com/webtoon/detail.nhn?titleId=752803
view = soup.find('div', class_='swiper-wrapper')
for div in view.findAll('div', class_='swiper-slide'):
if div.parent != view:
continue
if div.find('div', class_='cut_viewer_last'):
print('cut_viewer_last')
continue
if div.find('div', class_='cut_viewer_recomm'):
print('cut_viewer_recomm')
continue
img = div.find('img')
img = img.attrs['data-src']
img = urljoin(page.url, img)
img = Image(img, page, len(imgs))
imgs.append(img)
elif type_ == 'EFFECTTOON': #2313; https://m.comic.naver.com/webtoon/detail.nhn?titleId=670144
img_base = re.find('''imageUrl *: *['"](.+?)['"]''', html) + '/'
print('img_base:', img_base)
url_api = re.find('''documentUrl *: *['"](.+?)['"]''', html)
data_raw = downloader.read_html(url_api, page.url)
data = json.loads(data_raw)
for img in data['assets']['stillcut'].values(): # ordered in python3.7+
img = urljoin(img_base, img)
img = Image(img, page, len(imgs))
imgs.append(img)
else:
_imgs = re.findall('sImageUrl *: *[\'"](.+?)[\'"]', html)
if not _imgs:
raise Exception('no imgs')
for img in _imgs:
img = urljoin(page.url, img)
img = Image(img, page, len(imgs))
imgs.append(img)
return imgs
def get_imgs_all(url, title, cw=None):
print_ = get_print(cw)
info, pages = get_pages(url, cw)
pages = page_selector.filter(pages, cw)
imgs = []
for p, page in enumerate(pages):
check_alive(cw)
imgs_already = get_imgs_already('navertoon', title, page, cw)
if imgs_already:
imgs += imgs_already
continue
imgs_new = get_imgs(page, cw)
print_('{}: {}'.format(page.title, len(imgs_new)))
imgs += imgs_new
if cw is not None:
cw.setTitle(tr_('읽는 중... {} / {} ({}/{})').format(title, page.title, p + 1, len(pages)))
return imgs
|
from typing import List, Union
from rply.token import BaseBox
class Node(BaseBox):
def __init__(self, name: str, attributes={}):
self.name = name
self.attributes = attributes
def __eq__(self, right):
return self.name == right.name
def __ne__(self, right):
return self.name != right.name
def __hash__(self):
return self.name.__hash__()
def __repr__(self):
return f"Node(name={self.name} attributes={self.attributes})"
class Edge(BaseBox):
def __init__(self, source: List[Node], target: List[Node], attributes={}):
self.source = source
self.target = target
self.attributes = attributes
def __eq__(self, right):
return self.source == right.source and self.target == right.target
def __ne__(self, right):
return not self.__eq__(right)
def __repr__(self):
return f"Edge({self.source} -> {self.target} attributes={self.attributes})"
class LongEdge(BaseBox):
def __init__(self, points: List[List[Node]], attributes={}):
self.points = points
self.attributes = attributes
def __repr__(self):
return f"LongEdge({self.points} attributes={self.attributes}"
class Graph(BaseBox):
def __init__(self, name: str, elements: List[Union[Node, Edge, LongEdge]]):
self.name = name
self.nodes = []
self.edges = []
for e in elements:
if type(e) == Node:
self.nodes.append(e)
elif type(e) == Edge:
self.edges.append(e)
elif type(e) == LongEdge:
m = len(e.points)
for i in range(m - 1):
self.edges.append(Edge(e.points[i], e.points[i + 1], e.attributes))
def __repr__(self):
return f"Graph(name={self.name} nodes={self.nodes} edges={self.edges})"
|
import sys
dictionary_file = sys.argv[1]
rules_file = sys.argv[2]
test_file = sys.argv[3]
global count
count = 0
global found
found = False
#----------------------------------------Dictionary--------------------------------------------------------
def create_dict():
list_k = []
list_v = []
list1 = open(dictionary_file,'r').readlines()
for line in list1:
line=line.lower()
list_1 = line.split()
key = list_1[0].lower()
value = list_1[1:]
list_k.append(key)
list_v.append(value)
return list_k,list_v
list_key,list_val = create_dict()
global dict_y
dict_y=dict(zip(list_key,list_val))
l=0
k=l
for i in list_key:
k=k+1
for j in list_key[k:]:
if i==j:
z=list_key.index(i)
dict_y[i].extend(list_val[z])
#-------------------------------------------Search after applying rules------------------------------
def morp_dict_search(wrd,list_r,list_f,list_3_prev,list_5_prev,):
if wrd in dict_y:
found=True
val = dict_y[wrd]
if list_3_prev == "":
if "root" in dict_y[wrd]:
print test_word, list_f, "ROOT=",dict_y[wrd][dict_y[wrd].index("ROOT")+1], "SOURCE= MORPHOLOGY\n"
else:
print test_word, list_f, "ROOT=",wrd, "SOURCE= MORPHOLOGY\n"
elif (list_3_prev != ""):
if(list_3_prev == list_f):
if "root" in dict_y[wrd]:
print test_word, list_5_prev, "ROOT=",dict_y[wrd][dict_y[wrd].index("ROOT")+1], "SOURCE= MORPHOLOGY\n"
else:
print test_word, list_5_prev, "ROOT=",wrd, "SOURCE= MORPHOLOGY\n"
#return word_found_in_dict
else:
found = False
morph_analy(wrd,list_r,list_f)
#------------------------------------------------Applying rules-Analyser----------------------------------------
def morph_analy(x,list_3,list_5):
#word_found_in_dict=False
global count
# if_found = False
file_rule = open(rules_file,'r').readlines()
for i in file_rule:
i=i.lower()
list_rule = i.split()
if list_rule[0] == "suffix" and str(x).endswith(list_rule[1]):
if list_rule[2].isalpha():
suff_len=len(list_rule[1])
wrd_srch = x[:-suff_len]
rep_len = len(list_rule[2])
wrd_srch = wrd_srch + list_rule[2]
count +=1
if(list_3 != "") and (list_3 != list_rule[5]):
break
else:
morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
#morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
else:
suff_len=len(list_rule[1])
wrd_srch = x[:-suff_len]
count +=1
if(list_3 != "") and (list_3 != list_rule[5]):
break
else:
morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
#morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
elif list_rule[0] == "prefix" and str(x).startswith(list_rule[1]):
if list_rule[2].isalpha() == True:
pref_len=len(list_rule[1])
wrd_srch = x[pref_len:]
rep_len = len(list_rule[2])
wrd_srch =list_rule[2] + wrd_srch
count +=1
if(list_3 != "") and (list_3 != list_rule[5]):
break
morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
else:
pref_len = len(list_rule[1])
wrd_srch = x[pref_len:]
count +=1
if(list_3 != "") and (list_3 != list_rule[5]):
break
morp_dict_search(wrd_srch,list_rule[3],list_rule[5],list_3,list_5)
if x == test_word and count == 0 :
print test_word,"noun", "ROOT=",test_word, "SOURCE= Default\n"
#------------------------------------------dictionary search-----------------------------------------------
list_t =open(test_file,'r').readlines()
for i in list_t:
check_y=i.strip("\n")
check_i=check_y.lower()
if check_i in dict_y:
if "ROOT" in dict_y[check_i]:
print check_i, dict_y[check_i][0], "ROOT=",dict_y[check_i][dict_y[check_i].index("ROOT")+1], "SOURCE= Dictionary\n"
else:
print check_i, dict_y[check_i][0], "ROOT=",check_i, "SOURCE= Dictionary\n"
else:
list_3 = ""
list_5 = ""
global test_word
test_word = check_i
found =False
morph_analy(check_i,list_3,list_5)
if found == False and check_i != "":
print check_i,"noun", "ROOT=",test_word, "SOURCE= Default"
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 4 07:20:37 2022
@author: A.Kuzmin
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.interpolate import make_lsq_spline
from scipy.interpolate import InterpolatedUnivariateSpline
#, BSpline
#from scipy.interpolate import make_interp_spline
##########################################################
#
# Zero line for atomic absorption by LSQ spline
#
##########################################################
#
# Absorption coefficient mu(E) from E0 to Emax
#
def LSQ_ZeroLine(x, y, k):
xk = np.zeros(shape=(x.size))
ysk = np.zeros(shape=(xk.size))
# B-spline degree. Default is cubic, k=3.
# k = 3
# Calculate number of intervals
nk = round(x[-1]/500.0)+1
if nk <= 3:
nk = 3
print(nk)
# Calculate k axis
xk = (2.0/7.62*x)**0.5
print(xk)
# Calculate knots
tk = np.empty(shape=(nk-1))
tk.fill(0)
for i in range(0, nk-1):
tk[i] = np.sqrt((2.0/7.62*(i+1)*x[-1]/(nk))) #**0.5
print(tk)
# p = np.linspace(1, nk, nk)
# tk = (2.0/7.62*p*x[-1]/(nk))**0.5
# print(tk)
tt = np.r_[(xk[0],)*(k+1), tk, (xk[-1],)*(k+1)]
# print(tt)
# Compute the (coefficients of) an LSQ B-spline.
splk3 = make_lsq_spline(xk, y*xk*xk*xk, tt, k)
# Compute zero line
ysk = splk3(xk)/xk/xk/xk
i = 0
while xk[i] < tk[1]:
i = i + 1
n = i
xk_ = np.empty(shape=(xk.size-n))
xk_.fill(0)
ysk_ = np.empty(shape=(xk.size-n))
ysk_.fill(0)
for j in range(n,xk.size):
ysk_[j-n] = ysk[j]
xk_[j-n] = xk[j]
return ysk_
def extend_zeroline(x, y, x_new):
# Spline order: 1 linear, 2 quadratic, 3 cubic ...
order = 3
# Do extrapolation from E0 to Emin
s = InterpolatedUnivariateSpline(x, y, k=order)
return s(x_new)
##########################################################
# if __name__ == '__main__':
# # Main code
# nf = 999
# x = np.empty(shape=(nf))
# x.fill(0)
# xk = np.empty(shape=(nf))
# xk.fill(0)
# y = np.empty(shape=(nf))
# y.fill(0)
# #ysk2 = np.empty(shape=(nf))
# #ysk2.fill(0)
# #yy = np.empty(shape=(nf))
# #yy.fill(0)
# #
# # Create "Experimental data"
# #
# for i in range (0,nf):
# xk[i] = 30.0*(i+1)/(nf+1) # k space
# x[i] = xk[i]*xk[i]*7.62/2.0 # E space
# y[i] = 6.0*np.sin(2.0*xk[i]*2.0)*np.exp(-2.0*0.01*0.01*xk[i]*xk[i])/(xk[i]*2.0*2.0)
# # y[i] = y[i] + 2*np.sin(0.15*xk[i])
# # y[i] = y[i] + 2*np.sin(0.1*xk[i])
# y[i] = y[i] + 2*np.sin(0.01*xk[i])
# #
# # Calculate zero line for absorption coefficient mu(E)=y(x) from E0 to Emax
# #
# yy = LSQ_ZeroLine(x, y, 3)
# plt.plot(x, y, 'r-', lw=1, ms=5)
# #plt.plot(x, ysk2,'b-', lw=3, label='LSQ spline k^2')
# plt.plot(x, yy,'k-', lw=3, label='LSQ spline k^2')
# plt.legend(loc='best')
# plt.show()
|
from django.utils import simplejson
from dajaxice.decorators import dajaxice_register
from dajax.core import Dajax
@dajaxice_register
def init_form(request):
dajax = Dajax()
dajax.assin('.form','innerHTML','Hello')
return dajax.json() |
# coding=utf-8
""" display.py
Read file like italics.txt and display a neighborhood of lines from
mwadj.txt for each instance.
"""
import sys, re,codecs
import collections # requires Python 2.7+
class Italics(object):
def __init__(self,line):
line = line.rstrip('\r\n')
self.line=line
parts = line.split('@')
if len(parts) == 5:
(self.lnum,self.key1,self.linenum,self.itype,self.txt) = parts
self.ok = True
else:
self.ok = False
def init_italics(filein):
with codecs.open(filein,"r","utf-8") as f:
#recs = [Italics(line) for line in f]
#recs = [Italics(line) for n,line in f.iteritems() if n < 50]
recs=[]
n = 0
for line in f:
rec = Italics(line)
if rec.ok:
recs.append(rec)
n=n+1
#if n == 50: #dbg
# break
print len(recs),"records from",filein
return recs
def tokenize(s0):
"""
"""
# Use only letters and numbers and '-'
s = re.sub(r'[^a-zA-Z0-9-]',' ',s0)
# Split on sequences of space characters
tokens = re.split(r' +',s)
tokens = [t for t in tokens if t!='']
if False:
out = ' :: '.join(tokens)
out = '%s => %s => %s' %(s0,s,tokens)
print out.encode('utf-8')
return tokens
def init_tokendict(recs):
d = {}
for rec in recs:
txt = rec.txt
words = tokenize(rec.txt)
key = ' '.join(words)
if key not in d:
d[key] = []
d[key].append(rec)
return d
def display(filein,inlines,fileout):
italics = init_italics(filein)
tokendict = init_tokendict(italics)
#keys = sorted(tokendict.keys(),key = lambda x: x.lower())
keys0 = [(x,len(tokendict[x])) for x in tokendict.keys()]
keys = sorted(keys0,key=lambda x:(-x[1],x[0]))
fout = codecs.open(fileout,'w','utf-8')
nout = 0
n=0
icase = 0
for (key,l) in keys:
recs = tokendict[key]
recs = sorted(recs,key=lambda x: int(x.linenum))
icase = icase + 1
outarr=[]
if icase != 1:
outarr.append('')
outarr.append('-'*60)
out = 'Case %03d. key=%s, %s subcases' %(icase,key,len(recs))
outarr.append(out)
isubcase = 0
for rec in recs:
isubcase = isubcase + 1
out ='Subcase %03d.%02d: hw=%s, L=%s, lnum=%s: txt=%s' %(icase,isubcase,rec.key1,rec.lnum,rec.linenum,rec.txt)
outarr.append('')
outarr.append(out)
ilinenum = int(rec.linenum)
idx0 = ilinenum - 1
idx1 = idx0 - 1
idx2 = idx0 + 2
# Add a line for editing
# We want to autoadjust for the word 'tum', since this is actually
# almost always the Sanskrit Infinitive ending.
# We check several conditions indicating this 'infinitive' sense of 'tum',
# and mark it as such if these conditions are met
line1 = inlines[idx1].rstrip() # previous line
line0 = inlines[idx0] # current line
if line1.startswith('<P>') and line1.endswith('-%%}') and\
line0.startswith('<>{%%tum'):
# it surely is infinitive
pfx = '#INF '
else:
pfx = '# '
# Change the 'type' to empty string
line = rec.line.replace('@Eng1@','@@')
if False and (icase == 1) and (isubcase == 2): # debug
print line1
print line0
print pfx
print line1.startswith('<P>')
print line1.endswith('-%%}')
print line0.startswith('<>{%%tum')
exit(1)
outarr.append(pfx + line)
for idx in xrange(idx1,idx2):
line = inlines[idx]
line = line.rstrip('\r\n')
out = '%06d %s' %(idx+1,line)
outarr.append(out)
#outarr.append('')
for out in outarr:
fout.write(out + '\n')
nout = nout + 1
fout.close()
print nout,"records written to",fileout
if __name__=="__main__":
filein = sys.argv[1] # file like italics.txt
filein1 = sys.argv[2] # mw72adj
fileout = sys.argv[3] #
# slurp digitization txt file into list of lines
with codecs.open(filein1,encoding='utf-8',mode='r') as f:
inlines = f.readlines()
display(filein,inlines,fileout)
|
"""Code for running ingestion stage"""
import json
import pathlib
import subprocess
import tempfile
from subprocess import CalledProcessError
from typing import Collection, Optional
import orjson
import pydantic
from sentry_sdk import set_tag
from vaccine_feed_ingest_schema import location
from vaccine_feed_ingest.utils.log import getLogger
from ..utils.validation import VACCINATE_THE_STATES_BOUNDARY
from . import caching, enrichment, outputs, site
from .common import STAGE_OUTPUT_SUFFIX, PipelineStage
logger = getLogger(__file__)
MAX_NORMALIZED_RECORD_SIZE = 15_000 # Maximum record size of 15KB for normalized reords
def run_fetch(
site_dir: pathlib.Path,
output_dir: pathlib.Path,
timestamp: str,
dry_run: bool = False,
fail_on_runner_error: bool = True,
) -> bool:
set_tag("vts.runner", f"{site_dir.parent.name}/{site_dir.name}")
set_tag("vts.stage", "fetch")
fetch_path, yml_path = site.resolve_executable(site_dir, PipelineStage.FETCH)
if not fetch_path:
log_msg = (
"Missing shared executable to run for yml in %s."
if yml_path
else "No fetch cmd or .yml config for %s to run."
)
logger.info(log_msg, site_dir.name)
return False
with tempfile.TemporaryDirectory(
f"_fetch_{site_dir.parent.name}_{site_dir.name}"
) as tmp_str:
tmp_dir = pathlib.Path(tmp_str)
fetch_output_dir = tmp_dir / "output"
fetch_output_dir.mkdir(parents=True, exist_ok=True)
logger.info(
"Fetching %s/%s and saving fetched output to %s",
site_dir.parent.name,
site_dir.name,
fetch_output_dir,
)
try:
subprocess.run(
[str(fetch_path), str(fetch_output_dir), str(yml_path)], check=True
)
except CalledProcessError as e:
if fail_on_runner_error:
raise e
logger.error(
"Subprocess %s/%s errored on fetch, stage will be skipped",
site_dir.parent.name,
site_dir.name,
)
return False
if not outputs.data_exists(fetch_output_dir):
msg = f"{fetch_path.name} for {site_dir.name} returned no data files."
if fail_on_runner_error:
raise NotImplementedError(msg)
logger.warning(msg)
return False
if not dry_run:
fetch_run_dir = outputs.generate_run_dir(
output_dir,
site_dir.parent.name,
site_dir.name,
PipelineStage.FETCH,
timestamp,
)
logger.info("Copying files from %s to %s", fetch_output_dir, fetch_run_dir)
outputs.copy_files(fetch_output_dir, fetch_run_dir)
return True
def run_parse(
site_dir: pathlib.Path,
output_dir: pathlib.Path,
timestamp: str,
validate: bool = True,
dry_run: bool = False,
fail_on_runner_error: bool = True,
) -> bool:
set_tag("vts.runner", f"{site_dir.parent.name}/{site_dir.name}")
set_tag("vts.stage", "parse")
parse_path, yml_path = site.resolve_executable(site_dir, PipelineStage.PARSE)
if not parse_path:
log_msg = (
"Missing shared executable to run for yml in %s."
if yml_path
else "No parse cmd or .yml config for %s to run."
)
logger.info(log_msg, site_dir.name)
return False
fetch_run_dir = outputs.find_latest_run_dir(
output_dir, site_dir.parent.name, site_dir.name, PipelineStage.FETCH
)
if not fetch_run_dir:
logger.warning(
"Skipping parse stage for %s because there is no data from fetch stage",
site_dir.name,
)
return False
if not outputs.data_exists(fetch_run_dir):
logger.warning("No fetch data available to parse for %s.", site_dir.name)
return False
with tempfile.TemporaryDirectory(
f"_parse_{site_dir.parent.name}_{site_dir.name}"
) as tmp_str:
tmp_dir = pathlib.Path(tmp_str)
parse_output_dir = tmp_dir / "output"
parse_input_dir = tmp_dir / "input"
parse_output_dir.mkdir(parents=True, exist_ok=True)
parse_input_dir.mkdir(parents=True, exist_ok=True)
outputs.copy_files(fetch_run_dir, parse_input_dir)
logger.info(
"Parsing %s/%s and saving parsed output to %s",
site_dir.parent.name,
site_dir.name,
parse_output_dir,
)
try:
subprocess.run(
[
str(parse_path),
str(parse_output_dir),
str(parse_input_dir),
str(yml_path),
],
check=True,
)
except CalledProcessError as e:
if fail_on_runner_error:
raise e
logger.error(
"Subprocess %s/%s errored on parse, stage will be skipped",
site_dir.parent.name,
site_dir.name,
)
return False
if not outputs.data_exists(
parse_output_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.PARSE]
):
msg = f"{parse_path.name} for {site_dir.name} returned no data files with expected extension {STAGE_OUTPUT_SUFFIX[PipelineStage.PARSE]}."
if fail_on_runner_error:
raise NotImplementedError(msg)
logger.warning(msg)
return False
if validate:
if not _validate_parsed(parse_output_dir):
msg = f"{parse_path.name} for {site_dir.name} returned invalid ndjson files."
if fail_on_runner_error:
raise TypeError(msg)
logger.warning(msg)
return False
if not dry_run:
parse_run_dir = outputs.generate_run_dir(
output_dir,
site_dir.parent.name,
site_dir.name,
PipelineStage.PARSE,
timestamp,
)
logger.info("Copying files from %s to %s", parse_output_dir, parse_run_dir)
outputs.copy_files(parse_output_dir, parse_run_dir)
return True
def run_normalize(
site_dir: pathlib.Path,
output_dir: pathlib.Path,
timestamp: str,
validate: bool = True,
dry_run: bool = False,
fail_on_runner_error: bool = True,
) -> bool:
set_tag("vts.runner", f"{site_dir.parent.name}/{site_dir.name}")
set_tag("vts.stage", "normalize")
normalize_path, yml_path = site.resolve_executable(
site_dir, PipelineStage.NORMALIZE
)
if not normalize_path:
logger.info("No normalize cmd for %s to run.", site_dir.name)
return False
parse_run_dir = outputs.find_latest_run_dir(
output_dir, site_dir.parent.name, site_dir.name, PipelineStage.PARSE
)
if not parse_run_dir:
logger.warning(
"Skipping normalize stage for %s because there is no data from parse stage",
site_dir.name,
)
return False
if not outputs.data_exists(
parse_run_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.PARSE]
):
logger.warning(
"No parse data available to normalize for %s with extension %s.",
site_dir.name,
STAGE_OUTPUT_SUFFIX[PipelineStage.PARSE],
)
return False
with tempfile.TemporaryDirectory(
f"_normalize_{site_dir.parent.name}_{site_dir.name}"
) as tmp_str:
tmp_dir = pathlib.Path(tmp_str)
normalize_output_dir = tmp_dir / "output"
normalize_input_dir = tmp_dir / "input"
normalize_output_dir.mkdir(parents=True, exist_ok=True)
normalize_input_dir.mkdir(parents=True, exist_ok=True)
outputs.copy_files(parse_run_dir, normalize_input_dir)
logger.info(
"Normalizing %s/%s and saving normalized output to %s",
site_dir.parent.name,
site_dir.name,
normalize_output_dir,
)
try:
if yml_path:
subprocess.run(
[
str(normalize_path),
normalize_output_dir,
normalize_input_dir,
str(yml_path),
],
check=True,
)
else:
subprocess.run(
[str(normalize_path), normalize_output_dir, normalize_input_dir],
check=True,
)
except CalledProcessError as e:
if fail_on_runner_error:
raise e
logger.error(
"Subprocess %s/%s errored on normalize, stage will be skipped",
site_dir.parent.name,
site_dir.name,
)
return False
if not outputs.data_exists(
normalize_output_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.NORMALIZE]
):
msg = f"{normalize_path.name} for {site_dir.name} returned no data files with expected extension {STAGE_OUTPUT_SUFFIX[PipelineStage.NORMALIZE]}."
if fail_on_runner_error:
raise NotImplementedError(msg)
logger.warning(msg)
return False
if validate:
if not _validate_normalized(normalize_output_dir):
msg = f"{normalize_path.name} for {site_dir.name} returned invalid source location ndjson files."
if fail_on_runner_error:
raise TypeError(msg)
logger.warning(msg)
return False
if not dry_run:
normalize_run_dir = outputs.generate_run_dir(
output_dir,
site_dir.parent.name,
site_dir.name,
PipelineStage.NORMALIZE,
timestamp,
)
logger.info(
"Copying files from %s to %s", normalize_output_dir, normalize_run_dir
)
outputs.copy_files(normalize_output_dir, normalize_run_dir)
return True
def run_enrich(
site_dir: pathlib.Path,
output_dir: pathlib.Path,
timestamp: str,
enable_apicache: bool = True,
enrich_apis: Optional[Collection[str]] = None,
geocodio_apikey: Optional[str] = None,
placekey_apikey: Optional[str] = None,
dry_run: bool = False,
) -> bool:
set_tag("vts.runner", f"{site_dir.parent.name}/{site_dir.name}")
set_tag("vts.stage", "enrich")
normalize_run_dir = outputs.find_latest_run_dir(
output_dir, site_dir.parent.name, site_dir.name, PipelineStage.NORMALIZE
)
if not normalize_run_dir:
logger.warning(
"Skipping enrich for %s because there is no data from normalize stage",
site_dir.name,
)
return False
if not outputs.data_exists(
normalize_run_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.NORMALIZE]
):
logger.warning(
"No normalize data available to enrich for %s.",
f"{site_dir.parent.name}/{site_dir.name}",
)
return False
with tempfile.TemporaryDirectory(
f"_enrich_{site_dir.parent.name}_{site_dir.name}"
) as tmp_str:
tmp_dir = pathlib.Path(tmp_str)
enrich_output_dir = tmp_dir / "output"
enrich_input_dir = tmp_dir / "input"
enrich_output_dir.mkdir(parents=True, exist_ok=True)
enrich_input_dir.mkdir(parents=True, exist_ok=True)
outputs.copy_files(normalize_run_dir, enrich_input_dir)
logger.info(
"Enriching %s/%s and saving enriched output to %s",
site_dir.parent.name,
site_dir.name,
enrich_output_dir,
)
success = None
if enable_apicache and enrich_apis:
with caching.api_cache_for_stage(
output_dir, site_dir, PipelineStage.ENRICH
) as api_cache:
success = enrichment.enrich_locations(
enrich_input_dir,
enrich_output_dir,
api_cache=api_cache,
enrich_apis=enrich_apis,
geocodio_apikey=geocodio_apikey,
placekey_apikey=placekey_apikey,
)
else:
success = enrichment.enrich_locations(enrich_input_dir, enrich_output_dir)
if not success:
logger.error(
"Enrichment failed for %s.", f"{site_dir.parent.name}/{site_dir.name}"
)
return False
if not dry_run:
enrich_run_dir = outputs.generate_run_dir(
output_dir,
site_dir.parent.name,
site_dir.name,
PipelineStage.ENRICH,
timestamp,
)
logger.info(
"Copying files from %s to %s", enrich_output_dir, enrich_run_dir
)
outputs.copy_files(enrich_output_dir, enrich_run_dir)
return True
def _validate_parsed(output_dir: pathlib.Path) -> bool:
"""Validate output files are valid ndjson records."""
for filepath in outputs.iter_data_paths(
output_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.PARSE]
):
with filepath.open(mode="rb") as ndjson_file:
for line_no, content in enumerate(ndjson_file, start=1):
try:
orjson.loads(content)
except json.JSONDecodeError:
logger.warning(
"Invalid json record in %s at line %d: %s",
filepath,
line_no,
content,
)
return False
return True
def _validate_normalized(output_dir: pathlib.Path) -> bool:
"""Validate output files are valid normalized locations."""
for filepath in outputs.iter_data_paths(
output_dir, suffix=STAGE_OUTPUT_SUFFIX[PipelineStage.NORMALIZE]
):
with filepath.open(mode="rb") as ndjson_file:
for line_no, content in enumerate(ndjson_file, start=1):
if len(content) > MAX_NORMALIZED_RECORD_SIZE:
logger.warning(
"Source location too large to process in %s at line %d: %s",
filepath,
line_no,
content[:100],
)
return False
try:
content_dict = orjson.loads(content)
except json.JSONDecodeError:
logger.warning(
"Invalid json record in %s at line %d: %s",
filepath,
line_no,
content,
)
return False
try:
normalized_location = location.NormalizedLocation.parse_obj(
content_dict
)
except pydantic.ValidationError as e:
logger.warning(
"Invalid source location in %s at line %d: %s\n%s",
filepath,
line_no,
content[:100],
str(e),
)
return False
if normalized_location.location:
if not VACCINATE_THE_STATES_BOUNDARY.contains(
normalized_location.location
):
logger.warning(
"Invalid latitude or longitude in %s at line %d: %s is outside approved bounds (%s)",
filepath,
line_no,
normalized_location.location,
VACCINATE_THE_STATES_BOUNDARY,
)
return False
return True
|
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Replace All Digits with Characters.py
@time: 2021/05/02
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
class Solution:
def replaceDigits(self, s: str) -> str:
ret = ''
last = ''
letters = 'abcdefghijklmnopqrstuvwxyz'
d = {l: i for i, l in enumerate(letters)}
for i, t in enumerate(s):
if i & 1:
ret += letters[d[last] + int(t)]
else:
last = t
ret += t
return ret
|
from django.db import models
class URL_Shortener(models.Model):
url_name = models.CharField(max_length=800)
shortened_url = models.CharField(max_length=800)
def __str__(self):
return self.url_name
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by Roberto Preste
import pytest
import numpy as np
import prestools.bioinf as pb
# pb.hamming_distance
def test_hamming_distance_zero():
expect = 0
result = pb.hamming_distance("CAGATA", "CAGATA")
assert result == expect
def test_hamming_distance_one():
expect = 1
result = pb.hamming_distance("CAGATA", "CACATA")
assert result == expect
def test_hamming_distance_many():
expect = 6
result = pb.hamming_distance("CAGATA", "GTCTAT")
assert result == expect
def test_hamming_distance_error():
with pytest.raises(ValueError):
pb.hamming_distance("CAGATA", "CACACACA")
def test_hamming_distance_different_case():
expect = 6
result = pb.hamming_distance("CAGATA", "cagata", ignore_case=False)
assert result == expect
def test_hamming_distance_ignore_case():
expect = 0
result = pb.hamming_distance("CAGATA", "cagata", ignore_case=True)
assert result == expect
# pb.aa_one_to_three()
def test_aa_one_to_three():
expect = "CysAlaAsnAsnGlu"
result = pb.aa_one_to_three("CANNE")
assert result == expect
def test_aa_one_to_three_lowercase():
expect = "CysAlaAsnAsnGlu"
result = pb.aa_one_to_three("canne")
assert result == expect
# pb.aa_three_to_one()
def test_aa_three_to_one():
expect = "CANNE"
result = pb.aa_three_to_one("CysAlaAsnAsnGlu")
assert result == expect
def test_aa_three_to_one_lowercase():
expect = "CANNE"
result = pb.aa_three_to_one("cysalaasnasnglu")
assert result == expect
# pb.reverse_complement()
def test_reverse_complement():
expect = "CAGATA"
result = pb.reverse_complement("TATCTG", conversion="reverse_complement")
assert result == expect
def test_reverse_complement_short():
expect = "CAGATA"
result = pb.reverse_complement("TATCTG", conversion="rc")
assert result == expect
def test_reverse_complement_reverse():
expect = "CAGATA"
result = pb.reverse_complement("ATAGAC", conversion="reverse")
assert result == expect
def test_reverse_complement_reverse_short():
expect = "CAGATA"
result = pb.reverse_complement("ATAGAC", conversion="r")
assert result == expect
def test_reverse_complement_complement():
expect = "CAGATA"
result = pb.reverse_complement("GTCTAT", conversion="complement")
assert result == expect
def test_reverse_complement_complement_short():
expect = "CAGATA"
result = pb.reverse_complement("GTCTAT", conversion="c")
assert result == expect
def test_reverse_complement_error():
with pytest.raises(ValueError):
pb.reverse_complement("CAGATA", conversion="invalid")
# pb.shuffle_sequence()
def test_shuffle_sequence_nt(sample_nt_sequence):
expect = {nt: sample_nt_sequence.count(nt) for nt in pb._NT_LIST}
res = pb.shuffle_sequence(sample_nt_sequence)
result = {nt: res.count(nt) for nt in pb._NT_LIST}
assert len(res) == len(sample_nt_sequence)
assert result == expect
def test_shuffle_sequence_aa(sample_aa_sequence):
expect = {aa: sample_aa_sequence.count(aa) for aa in pb._AA_LIST}
res = pb.shuffle_sequence(sample_aa_sequence)
result = {aa: res.count(aa) for aa in pb._AA_LIST}
assert len(res) == len(sample_aa_sequence)
assert result == expect
# pb.random_sequence()
def test_random_sequence_nt():
result = pb.random_sequence(200, alphabet="nt")
assert len(result) == 200
assert set(result) == set(pb._NT_LIST)
def test_random_sequence_aa():
result = pb.random_sequence(200, alphabet="aa")
assert len(result) == 200
assert set(result) == set(pb._AA_LIST)
def test_random_sequence_error():
with pytest.raises(ValueError):
pb.random_sequence(200, alphabet="invalid")
# pb.mutate_sequence()
def test_mutate_sequence_nt_one(sample_nt_sequence):
result = pb.mutate_sequence(sample_nt_sequence)
assert len(result) == len(sample_nt_sequence)
assert set(result) == set(pb._NT_LIST)
assert pb.hamming_distance(sample_nt_sequence, result) == 1
def test_mutate_sequence_aa_one(sample_aa_sequence):
result = pb.mutate_sequence(sample_aa_sequence, alphabet="aa")
assert len(result) == len(sample_aa_sequence)
assert set(result) == set(pb._AA_LIST)
assert pb.hamming_distance(sample_aa_sequence, result) == 1
def test_mutate_sequence_nt_many(sample_nt_sequence):
result = pb.mutate_sequence(sample_nt_sequence, mutations=10)
assert len(result) == len(sample_nt_sequence)
assert set(result) == set(pb._NT_LIST)
assert pb.hamming_distance(sample_nt_sequence, result) <= 10
def test_mutate_sequence_aa_many(sample_aa_sequence):
result = pb.mutate_sequence(sample_aa_sequence, mutations=10,
alphabet="aa")
assert len(result) == len(sample_aa_sequence)
assert set(result) == set(pb._AA_LIST)
assert pb.hamming_distance(sample_aa_sequence, result) <= 10
def test_mutate_sequence_error():
with pytest.raises(ValueError):
pb.mutate_sequence("CAGATA", alphabet="invalid")
# pb.p_distance
def test_p_distance(sample_nt_long_1, sample_nt_long_2):
expect = 0.7266
result = pb.p_distance(sample_nt_long_1, sample_nt_long_2)
assert result == expect
# pb.jukes_cantor_distance
def test_jukes_cantor_distance(sample_nt_long_1, sample_nt_long_2):
expect = 2.600502888125025
result = pb.jukes_cantor_distance(sample_nt_long_1, sample_nt_long_2)
assert result == expect
# pb.tajima_nei_distance
def test_tajima_nei_distance(sample_nt_long_1, sample_nt_long_2):
expect = 2.612489480361321
result = pb.tajima_nei_distance(sample_nt_long_1, sample_nt_long_2)
assert result == expect
# pb.kimura_distance
def test_kimura_distance(sample_nt_long_1, sample_nt_long_2):
expect = 2.6031087353225875
result = pb.kimura_distance(sample_nt_long_1, sample_nt_long_2)
assert result == expect
# pb.tamura_distance
def test_tamura_distance(sample_nt_long_1, sample_nt_long_2):
expect = 2.603755899559136
result = pb.tamura_distance(sample_nt_long_1, sample_nt_long_2)
assert result == expect
# pb.rpkm
def test_rpkm(sample_gene_counts, sample_gene_lengths):
expect = np.array([
[225335.50543533, 114525.36331398, 149823.72302588, 179779.64409024],
[0., 0., 0., 733.40990133],
[0., 0., 0., 0.],
[40426.94801193, 194619.75108416, 145501.92735762, 103192.72022265]
])
result = pb.rpkm(sample_gene_counts, sample_gene_lengths)
np.testing.assert_array_almost_equal(result, expect)
# pb.quantile_norm
def test_quantile_norm_raw(sample_gene_counts):
expect = np.array([
[6.280e+02, 2.455e+02, 6.280e+02, 6.280e+02],
[0.000e+00, 0.000e+00, 0.000e+00, 2.500e-01],
[0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00],
[2.455e+02, 6.280e+02, 2.455e+02, 2.455e+02]
])
result = pb.quantile_norm(sample_gene_counts)
np.testing.assert_array_almost_equal(result, expect)
def test_quantile_norm_log(sample_gene_counts):
expect = np.array([
[6.28121943, 5.41052327, 6.28121943, 6.28121943],
[0., 0., 0., 0.1732868],
[0., 0., 0., 0.],
[5.41052327, 6.28121943, 5.41052327, 5.41052327]
])
result = pb.quantile_norm(sample_gene_counts, to_log=True)
np.testing.assert_array_almost_equal(result, expect)
|
import cffi
class Utils:
def __init__(self):
pass
@staticmethod
def print_binary(msg):
mm = '\n '
for x in range(0, 16):
mm += format(x, 'x').zfill(2) + ' '
for x in range(0, len(msg)):
if x % 16 == 0:
mm += '\n' + format(x // 16, 'x').zfill(4) + ' '
mm += format(msg[x], 'x').zfill(2) + ' '
return mm
|
import xml.etree.ElementTree as ET
import re
from regex import regex
class Reader:
g_time = 0
def __init__(self, chunk_size):
self.chunk_size = chunk_size
def read_in_chunks(self, file_object):
while True:
data = file_object.read(self.chunk_size)
if not data:
break
yield data
def read_till_max(self, file_object, max_bytes):
read_bytes = 0
while True:
data = file_object.read(self.chunk_size)
read_bytes += self.chunk_size
if read_bytes > max_bytes:
break
if not data:
break
yield data
class DateInText:
def __init__(self, date, start, end):
self.date = date
self.start = start
self.end = end
class Index:
def __init__(self, token, date, info):
self.token = token
self.date = date
self.info = info
class Page:
def __init__(self, title, infobox, text):
self.title = title
self.infobox = infobox
self.text = text
self.time = time.clock()
@staticmethod
def find_date(text):
date = regex.search(r'(( [a-zA-Z]{3,8}|\d{1,2})[ ]\d{1,2}([ ]|(\,? ))\d{1,4})|(([a-zA-Z]{3,8}|in) \d{4})(?<=[^\}])', text)
if date:
start = date.start()
end = date.end()
date = date.group(0)
return DateInText(date, start, end)
return None
# def find_token(self, text, date_start, date_end):
# token_reg = regex.finditer(r'[A-Z][a-zA-Z]{3,}(?: [A-Z][a-zA-Z]*){0,}', text)
#
# closest_token = None
# closest_distance = 100
# for token_it in token_reg:
# token = token_it.group(0)
#
# if token_it.start() == date_start:
# continue
#
# if token_it.end() < date_start and date_start - token_it.end() < closest_distance:
# closest_distance = date_start - token_it.end()
# closest_token = token
# elif token_it.start() - date_end < closest_distance:
# closest_distance = token_it.start() - date_end
# closest_token = token
#
# return closest_token
def paragraph_splitter(self, sep):
text_split = self.text.split(sep=sep)
if len(text_split) >= 2:
self.text = text_split[0]
return True
else:
return None
def parse_text(self):
results = []
self.text = regex.sub(r'<ref.*\n?.*</ref>', repl="", string=self.text)
self.text = regex.sub(r'{\| class=\"wikitable.*\|}', repl="", string=self.text, flags=regex.DOTALL)
self.text = regex.sub(r'{{[cC]ite.*}}', repl="", string=self.text, flags=regex.DOTALL)
if self.paragraph_splitter(sep='== See also =='):
pass
elif self.paragraph_splitter(sep='==Notes=='):
pass
elif self.paragraph_splitter(sep='==References=='):
pass
elif self.paragraph_splitter(sep='== Bibliography =='):
pass
elif self.paragraph_splitter(sep='== External links =='):
pass
elif self.paragraph_splitter(sep='=== Sources ==='):
pass
sentences_reg = regex.finditer(r'(^| )[A-Z][^\.!?]{5,}[\.!?]', self.text) # possibly [A-Z][^\.!?]{5,}[\.!?] for performance
for sentence_it in sentences_reg:
sentence = sentence_it.group(0)
date_in_text = self.find_date(sentence)
if date_in_text:
look_before = 60
look_after = 30
start = date_in_text.start - look_before if date_in_text.start >= look_before else 0
end = date_in_text.end + look_after if date_in_text.end + look_after < len(sentence) else len(sentence)
# if date_in_text.end + look_after > len(sentence):
# token = self.find_token(sentence[start:], date_in_text.start, date_in_text.end)
# else:
# token = self.find_token(sentence[start:date_in_text.end + look_after], date_in_text.start, date_in_text.end)
token_context = sentence[start:end]
# token with full word at beginning
i = start
counter = 0
while True:
i -= 1
counter += 1
if i < 0 or counter > 8:
break
if not(sentence[i].isalpha() or sentence[i].isdigit()):
token_context = sentence[i+1:start] + token_context
break
# token with full word at end
i = end
counter = 0
while True:
i += 1
counter += 1
if i > len(sentence)-1 or counter > 8:
break
if not(sentence[i].isalpha() or sentence[i].isdigit()):
token_context += sentence[end:end+counter]
break
token_context = token_context.replace('\n', ' ')
token_context = regex.sub(r'[^a-zA-Z0-9.!?:%$;, ]', '', token_context)
token_context = token_context.strip()
results.append(Index(token=self.title, date=date_in_text.date,
info=token_context))
# I couldnt find best word that explain the purpose, often the result was meaningful, therefore I
# decided not to use it.
# tokenized = nltk.pos_tag(nltk.word_tokenize(sentence))
#
# proper_nouns = []
# nouns = []
# for (word, pos) in tokenized:
# if pos == 'NNP':
# proper_nouns.append(word)
# elif pos == 'NN':
# nouns.append(word)
#
# results.append(Index(token=proper_nouns[0] if proper_nouns else "title", date=date_in_text.date, info=proper_nouns[1] if
# len(proper_nouns) > 1 else nouns[0] if nouns else ""))
return results
def _parse_infobox(self, text: str, title):
result = []
text = regex.sub(r'\n ?\|', '\n|', text)
lines = text.split('\n|')
for line in lines:
date_in_text = self.find_date(line)
if date_in_text:
info = [x.strip() for x in line.split('=')]
result.append(Index(token=title, date=date_in_text.date, info=info[0].replace('\n', '')))
return result
def get_parsed_date_tokens(self):
results = []
# if self.infobox:
# infobox_results = self._parse_infobox(self.infobox, self.title)
# results.extend(infobox_results)
if self.text:
text_results = self.parse_text()
results.extend(text_results)
return results
class Parser:
def __init__(self):
pass
def parse_page(self, page):
tree = ET.fromstring(page)
title = tree.find('title').text
text = tree.find('revision').find('text').text
infobox = None
infobox_regex = regex.search(r'(?=\{Infobox)(\{([^{}]|(?1))*\})', page)
text_start_index = 0
if infobox_regex:
text_start_index = infobox_regex.end()
infobox = infobox_regex.groups(0)[0]
page = Page(title, infobox, text[text_start_index:])
return page.get_parsed_date_tokens()
def create_testing_file(name, size):
with open('E:\VINF_data\enwiki-20200401-pages-articles.xml', encoding='UTF-8') as f:
one_Mb = 1024*1024
reader = Reader(chunk_size=one_Mb)
with open(name, 'w', encoding='UTF-8') as write_file:
for data in reader.read_till_max(f, max_bytes=size):
write_file.write(data)
if __name__ == '__main__':
#with open('E:\VINF_data\enwiki-20200401-pages-articles.xml', encoding='UTF-8') as f:
import cProfile, pstats, io
import time
import os
import random
# create_testing_file('mild_testing.xml', 1024*1000*100)
exit(0)
def start_parse(number_of_errors: int):
start_time = time.clock()
path = "E:\VINF_data\enwiki-20200401-pages-articles.xml"
reader = Reader(chunk_size=1024 * 1024)
parser = Parser()
final_results = []
with open(path, encoding='UTF-8') as read_file:
page_tag_length = len('<page>')
page_end_tag_length = len('</page>')
data_for_next_chunk = ""
with open('output.txt', 'w', encoding='UTF-8') as write_file:
for data in reader.read_in_chunks(read_file):
data = f'{data_for_next_chunk}{data}'
start_pages_positions = [m.start() for m in re.finditer('<page>', data)]
end_pages_positions = [m.start() for m in re.finditer('</page>', data)]
if not start_pages_positions and not end_pages_positions:
continue
for i, end in enumerate(end_pages_positions):
start = start_pages_positions[i]
try:
results = parser.parse_page(data[start:end+page_end_tag_length])
for result in results:
write_file.write('%s\t%s' % (result.token, ";" + result.date + ";" + result.info) + "\n")
except Exception as e:
print(e)
number_of_errors += 1
if not end_pages_positions:
data_for_next_chunk = data
elif len(start_pages_positions) != len(end_pages_positions):
if not end_pages_positions:
data_for_next_chunk = data[start_pages_positions[0]:]
else:
data_for_next_chunk = data[end_pages_positions[-1] + page_end_tag_length:]
print("len", len(final_results))
ttime_sec = time.clock() - start_time
print("total time: ", ttime_sec)
f_size = os.path.getsize(path)
print(f"Speed: {(f_size/1000000)/ttime_sec}MB/sec")
print("error number:" + str(number_of_errors))
number_of_errors = 0
start_parse(number_of_errors)
# For debugging purposes
# cProfile.run('start_parse()', 'restats')
# print('Time: ', Reader.g_time)
# import pstats
# p = pstats.Stats('restats')
# p.strip_dirs().sort_stats(-1)
# p.sort_stats('time').print_stats()
|
"""
python vaccine_availability.py
"""
# standard imports
import requests
import datetime
import json
# import pandas as pd
import smtplib
def logger(line):
with open('log.txt', 'a+') as f:
f.write(line+"\n")
"""
To get the state code
for state_code in range(1,40):
# print("State code: ", state_code)
logger("State code: "+ str(state_code))
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{}".format(state_code))
json_data = json.loads(response.text)
for i in json_data["districts"]:
# print(i["district_id"],'\t', i["district_name"])
logger(str(i["district_id"])+'\t'+str(i["district_name"]))
# print("\n")
logger("\n")
"""
DIST_ID = 446
numdays = 20
age = 19
# Print available centre description (y/n)?
print_flag = 'y'
base = datetime.datetime.today()
date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]
date_str = [x.strftime("%d-%m-%Y") for x in date_list]
def getSlots(DIST_ID=446, numdays=20, age=19):
flag_available = False
Available_Slots = []
for INP_DATE in date_str:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}&date={}".format(
DIST_ID, INP_DATE)
response = requests.get(URL)
if response.ok:
resp_json = response.json()
# print(json.dumps(resp_json, indent = 2))
if resp_json["centers"]:
# print("Available on: {}".format(INP_DATE))
logger("Checking on: {}".format(INP_DATE))
if(print_flag == 'y' or print_flag == 'Y'):
for center in resp_json["centers"]:
for session in center["sessions"]:
if not int(session["available_capacity"]) == 0:
if session["min_age_limit"] <= age:
flag_available = True
dict_to_add = {"Date": INP_DATE,
"Name": center["name"],
"Block Name": center["block_name"],
"Fee Type": center["fee_type"],
"Available Capacity": session["available_capacity"],
"Vaccine": session["vaccine"]}
# print(dict_to_add)
Available_Slots.append(dict_to_add)
# print(Available_Slots)
logger("\t" + str(center["name"]))
logger("\t" + str(center["block_name"]))
logger("\t Price: " +
str(center["fee_type"]))
logger("\t Available Capacity: " +
str(session["available_capacity"]))
"""
print("\t", center["name"])
print("\t", center["block_name"])
print("\t Price: ", center["fee_type"])
print("\t Available Capacity: ",
session["available_capacity"])
"""
if(session["vaccine"] != ''):
logger("\t Vaccine: " +
str(session["vaccine"]))
# print("\n\n")
logger("\n\n")
return flag_available, Available_Slots
"""
if flag_available == False:
logger("No available slots on {}".format(INP_DATE))
return flag_available, Available_Slots
else:
# print("No available slots on {}".format(INP_DATE))
logger("No available slots on {}".format(INP_DATE))
return flag_available, Available_Slots
"""
def send_mail(body, receiver_email='swaymsdennings@gmail.com', subject='VACCINE AVAILABILITY NOTIFICATION'):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
sender_email = 'pythonhai.000@gmail.com'
server.login(sender_email, 'machinelearning@#$000')
msg = f"Subject: {subject}\n\n{body}"
server.sendmail(
sender_email,
receiver_email,
msg
)
# print('Email has been sent !')
server.quit()
def convert_to_str(Available_Slots):
string = ""
for slots in Available_Slots:
for key in slots:
val = slots[key]
string += str(key) + " : " + str(val) + "\n"
string += "\n\n"
return string
if __name__ == '__main__':
flag_available, Available_Slots = getSlots(DIST_ID=255, numdays=20, age=21)
msg = "No available slots found"
body = convert_to_str(Available_Slots) if len(Available_Slots) > 0 else msg
MAILS = ['swaymsdennings@gmail.com', 'njrfarhandasilva10@gmail.com']
for mail in MAILS:
send_mail(body, receiver_email=mail,
subject='VACCINE AVAILABILITY NOTIFICATION')
|
from runners.python import Submission
class DavidSubmission(Submission):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
n = 256
lst = list(range(n))
lengths = [int(x) for x in s.split(',')]
pos = 0 # current position index
skip_size = 0
for length in lengths:
i = pos
j = (pos+length-1)
for k in range(length//2):
tmp = lst[(j-k)%n]
lst[(j-k)%n] = lst[(i+k)%n]
lst[(i+k)%n] = tmp
pos = (pos + length + skip_size) % n
skip_size += 1
return lst[0]*lst[1]
|
# -*- coding: utf-8 -*-
"""
This program fits curves to the PSTH.
INPUT: PSTH data, dose
OUTPUT: parameters
Hint:
The INPUT is matrix (PSTH , duration and dose), so this program execute 3d fitting.
In order to realize 3d fitting, the INPUT is formed as below:
input = [[spike, spike, spike, ...], [dose, dose, dose, ...], [duration, duration, duration, ...]]
Caution!
BIN must be the same value through the input data.
USAGE
$ python fit_PSTH.py [data_dir]
"""
import os
import sys
import math
import copy
import glob
import numpy as np
import matplotlib.pylab as plt
from scipy.optimize import curve_fit
def read_data(path):
with open(path, "r") as file:
lines = file.readlines()
for i, line in enumerate(lines):
var = line.split()
if var[0] == "$":
if var[1] == "BIN":
bin = float(var[2])
elif var[1] == "NUM_DATA":
num = int(var[2])
PSTH = np.empty([num])
time = np.empty([num])
elif var[1] == "START_INDEX":
start_index = int(var[2])
elif var[1] == "STIMULI_DURATION":
duration = float(var[2])
elif var[1] == "DOSE":
dose = float(var[2])
else:
time[i-5], PSTH[i-5] = map(float, var)
return bin, num, start_index, duration, dose, PSTH, time
def save_parameters():
output_file = "parameters_{0}ms.txt".format(int(duration*1000))
with open(output_file, "w") as f:
f.write("[{0}ms]\n".format(int(duration*1000)))
f.write("f_sp = {0}\ntau_rise = {1}\nalpha = {2}\nK = {3}\ntau_fall = {4}\nmu = {5}".format(f_sp, tau_rise, alpha, K, tau_fall, mu))
def draw_fitted_curve(dose, c):
time_rising_200[1,:] = dose * np.ones(int(duration/bin)+1)
time_falling_200[1,:] = dose * np.ones(right - (int(duration/bin)+1))
f_before = f_sp * np.ones(len(time_before_200[0]))
f_rise = rising_spike(time_rising_200, tau_rise, alpha, K, mu)
f_fall = falling_spike(time_falling_200, tau_fall)
f_connected = np.hstack((f_before, f_rise, f_fall))
plt.plot(time, f_connected, "-", label=str(dose)+"ng_fitted_curve", color=c)
def spontaneous(t, f_sp):
return f_sp
def Michaelis_Menten(c, alpha, K):
return alpha / (1 + K/c)
def rising_spike(data, tau_rise, alpha, K, mu):
f_pe = Michaelis_Menten(data[1], alpha, K)
return f_sp + f_pe * ((1-mu)*np.exp(-(data[0])/tau_rise) + mu)
def falling_spike(data, tau_fall):
joint = copy.deepcopy(data)
joint[0,:] = joint[2,:]
fmax = rising_spike(joint, tau_rise, alpha, K, mu)
# return f_sp + f_max * np.exp(-(data[0]-data[2]-delay)/tau_fall)
return f_sp + fmax * np.exp(-(data[0]-data[2])/tau_fall)
# return f_sp + f_max * np.exp(-(t-(start+duration))/tau_rise)
# return f_sp + f_max * (data[0] - data[2]- tau_fall)**2
def get_index(x):
return int(round(x/bin))
def optimize_parameters():
global f_sp, tau_rise, alpha, K, mu
parameter_optimal ,covariance = curve_fit(spontaneous, time_spontaneous, PSTH_spontaneous)
f_sp = parameter_optimal[0]
parameter_optimal ,covariance = curve_fit(rising_spike, time_rising, PSTH_rising)
tau_rise, alpha, K, mu = parameter_optimal
# parameter_initial = np.array([50, tau_rise]) #f_max, tau_fall
parameter_optimal ,covariance = curve_fit(falling_spike, time_falling, PSTH_falling)
tau_fall = parameter_optimal[0]
return f_sp, tau_rise, alpha, K, tau_fall, mu
if __name__ == "__main__":
flag_first_data = True
input_dir = sys.argv[1]
files = glob.glob("{0}/*.txt".format(input_dir))
print "{0} files was imported.".format(len(files))
for file in files:
print file
bin, num, start_index, duration, dose, PSTH, time_vec = read_data(file)
stop_index = start_index + int(duration/bin) + 1
dose_vec = np.ones(num) * dose
duration_vec = np.ones(num) * duration
matrix = np.vstack((time_vec, dose_vec, duration_vec))
if flag_first_data:
time_spontaneous = matrix[:,:start_index]
time_rising = matrix[:,start_index:stop_index]
time_falling = matrix[:,stop_index-1:]
PSTH_spontaneous = PSTH[:start_index]
PSTH_rising = PSTH[start_index:stop_index]
PSTH_falling = PSTH[stop_index-1:]
flag_first_data = False
else:
time_spontaneous = np.c_[time_spontaneous, matrix[:,:start_index]]
time_rising = np.c_[time_rising, matrix[:,start_index:stop_index]]
# print file
# print matrix[:,start_index:stop_index]
time_falling = np.c_[time_falling, matrix[:,stop_index-1:]]
PSTH_spontaneous = np.hstack((PSTH_spontaneous, PSTH[:start_index]))
PSTH_rising = np.hstack((PSTH_rising, PSTH[start_index:stop_index]))
# print PSTH[start_index:stop_index]
PSTH_falling = np.hstack((PSTH_falling, PSTH[stop_index-1:]))
if file == "parsed_data/Park_1000ms/10000ng_10000.txt":
time_10000 = matrix[0]
PSTH_10000 = PSTH
elif file == "parsed_data/Park_1000ms/3000ng_3000.txt":
time_3000 = matrix[0]
PSTH_3000 = PSTH
elif file == "parsed_data/Park_1000ms/1000ng_1000.txt":
time_1000 = matrix[0]
PSTH_1000 = PSTH
print "==================================="
f_sp, tau_rise, alpha, K, tau_fall, mu = optimize_parameters()
print "f_sp = {0}\ntau_rise = {1}\nalpha = {2}\nK = {3}\ntau_fall = {4}\nmu = {5}\n".format(f_sp, tau_rise, alpha, K, tau_fall, mu)
save_parameters()
left = 50
duration = 1
right = 150
time_before_200 = np.vstack((bin*np.arange(-left,0), np.ndarray(left), duration * np.ones(left)))
time_rising_200 = np.vstack((bin*np.arange(0,int(duration/bin)+1), np.ndarray(int(duration/bin)+1), duration * np.ones(int(duration/bin)+1)))
time_falling_200 = np.vstack((bin*np.arange(int(duration/bin)+1,right), np.ndarray(right-(int(duration/bin)+1)), duration * np.ones(right-(int(duration/bin)+1))))
""" dots """
# time_connected = np.hstack((time_spontaneous, time_rising, time_falling))[0]
# PSTH = np.hstack((PSTH_spontaneous, PSTH_rising, PSTH_falling))
plt.plot(time_1000, PSTH_1000, "v", color="blue", label="1000ng_PSTH")
plt.plot(time_3000, PSTH_3000, "o", color="red", label="3000ng_PSTH")
plt.plot(time_10000, PSTH_10000, "x", color="green", label="10000ng_PSTH")
""" x axis for fitted curves """
time = bin * np.arange(-left, right)
draw_fitted_curve(1000, "blue")
draw_fitted_curve(5000, "red")
draw_fitted_curve(10000, "green")
plt.rcParams["font.size"] = 15
plt.title("{0} ms".format(duration * 1000))
plt.xlabel("time")
plt.xlim(-5,10)
plt.ylim(0,160)
plt.ylabel("PSTH")
plt.legend()
plt.show()
|
import pytest
import drjit as dr
import mitsuba as mi
def test01_discr_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.DiscreteDistribution()
assert d.empty()
with pytest.raises(RuntimeError) as excinfo:
d.update()
assert 'empty distribution' in str(excinfo.value)
def test02_discr_zero_prob(variants_all_backends_once):
# Test that operations involving zero probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.DiscreteDistribution([0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test03_discr_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.DiscreteDistribution([1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test04_discr_basic(variants_vec_backends_once):
# Validate discrete distribution cdf/pmf against hand-computed reference
x = mi.DiscreteDistribution([1, 3, 2])
assert len(x) == 3
assert x.sum() == 6
assert dr.allclose(x.normalization(), 1.0 / 6.0)
assert x.pmf() == [1, 3, 2]
assert x.cdf() == [1, 4, 6]
assert x.eval_pmf([1, 2, 0]) == [3, 2, 1]
assert dr.allclose(
x.eval_pmf_normalized([1, 2, 0]),
mi.Float([3, 2, 1]) / 6.0
)
assert dr.allclose(
x.eval_cdf_normalized([1, 2, 0]),
mi.Float([4, 6, 1]) / 6.0
)
assert repr(x) == 'DiscreteDistribution[\n size = 3,' \
'\n sum = [6],\n pmf = [1, 3, 2]\n]'
x.pmf()[:] = [1, 1, 1]
x.update()
assert x.cdf() == [1, 2, 3]
assert x.sum() == 3
assert dr.allclose(x.normalization(), 1.0 / 3.0)
def test05_discr_sample(variants_vec_backends_once):
# Validate discrete distribution sampling against hand-computed reference
eps = 1e-7
x = mi.DiscreteDistribution([1, 3, 2])
assert x.sample([-1, 0, 1, 2]) == [0, 0, 2, 2]
assert x.sample([1 / 6.0 - eps, 1 / 6.0 + eps]) == [0, 1]
assert x.sample([4 / 6.0 - eps, 4 / 6.0 + eps]) == [1, 2]
assert dr.allclose(
x.sample_pmf([-1, 0, 1, 2]),
([0, 0, 2, 2], mi.Float([1, 1, 2, 2]) / 6)
)
assert dr.allclose(
x.sample_pmf([1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 1], mi.Float([1, 3]) / 6)
)
assert dr.allclose(
x.sample_pmf([4 / 6.0 - eps, 4 / 6.0 + eps]),
([1, 2], mi.Float([3, 2]) / 6)
)
assert dr.allclose(
x.sample_reuse([0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 0, 0, 1], mi.Float([0, .5, 1, 0])),
atol=3 * eps
)
assert dr.allclose(
x.sample_reuse_pmf([0, 1 / 12.0, 1 / 6.0 - eps, 1 / 6.0 + eps]),
([0, 0, 0, 1], mi.Float([0, .5, 1, 0]), mi.Float([1, 1, 1, 3]) / 6),
atol=3 * eps
)
def test06_discr_bruteforce(variants_vec_backends_once):
# Brute force validation of discrete distribution sampling, PCG32, UInt64
rng = mi.PCG32(initseq=dr.arange(mi.UInt64, 50))
for size in range(2, 20, 5):
for i in range(2, 50, 5):
density = mi.Float(rng.next_uint32_bounded(i)[0:size])
if dr.sum(density)[0] == 0:
continue
ddistr = mi.DiscreteDistribution(density)
x = dr.linspace(mi.Float, 0, 1, 20)
y = ddistr.sample(x)
z = dr.gather(mi.Float, ddistr.cdf(), y - 1, y > 0)
x *= ddistr.sum()
# Did we sample the right interval?
assert dr.all((x > z) | (dr.eq(x, 0) & (x >= z)))
def test07_discr_leading_trailing_zeros(variants_vec_backends_once):
# Check that sampling still works when there are zero-valued buckets
x = mi.DiscreteDistribution([0, 0, 1, 0, 1, 0, 0, 0])
index, pmf = x.sample_pmf([-100, 0, 0.5, 0.5 + 1e-6, 1, 100])
assert index == [2, 2, 2, 4, 4, 4]
assert pmf == [.5] * 6
def test08_cont_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.ContinuousDistribution()
assert d.empty()
d.range()[:] = [1, 2]
with pytest.raises(RuntimeError) as excinfo:
d.update()
assert 'needs at least two entries' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [1])
assert 'needs at least two entries' in str(excinfo.value)
def test09_cont_empty_invalid_range(variants_all_backends_once):
# Test that invalid range specifications throw an exception
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 1], [1, 1])
assert 'invalid range' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([2, 1], [1, 1])
assert 'invalid range' in str(excinfo.value)
def test10_cont_zero_prob(variants_all_backends_once):
# Test that operations involving zero probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test11_cont_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.ContinuousDistribution([1, 2], [1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test12_cont_eval(variants_vec_backends_once):
# Test continuous 1D distribution pdf/cdf against hand-computed reference
d = mi.ContinuousDistribution([2, 3], [1, 2])
eps = 1e-6
assert dr.allclose(d.max(), 2.0)
assert dr.allclose(d.integral(), 3.0 / 2.0)
assert dr.allclose(d.normalization(), 2.0 / 3.0)
assert dr.allclose(
d.eval_pdf_normalized([1, 2 - eps, 2, 2.5, 3, 3 + eps, 4]),
[0, 0, 2.0 / 3.0, 1.0, 4.0 / 3.0, 0, 0]
)
assert dr.allclose(
d.eval_cdf_normalized([1, 2, 2.5, 3, 4]),
[0, 0, 5.0 / 12.0, 1, 1]
)
assert d.sample([0, 1]) == [2, 3]
x, pdf = d.sample_pdf([0, 0.5, 1])
dx = (dr.sqrt(10) - 2) / 2
assert x == [2, 2 + dx, 3]
assert dr.allclose(
pdf,
[2.0 / 3.0, (4 * dx + 2 * (1 - dx)) / 3.0, 4.0 / 3.0]
)
def test13_cont_func(variants_vec_backends_once):
# Test continuous 1D distribution integral against analytic result
x = dr.linspace(mi.Float, -2, 2, 513)
y = dr.exp(-dr.sqr(x))
d = mi.ContinuousDistribution([-2, 2], y)
assert dr.allclose(d.max(), 1.0)
assert dr.allclose(d.integral(), dr.sqrt(dr.pi) * dr.erf(2.0))
assert dr.allclose(d.eval_pdf([1]), [dr.exp(-1)])
assert dr.allclose(d.sample([0, 0.5, 1]), [-2, 0, 2])
def test14_irrcont_empty(variants_all_backends_once):
# Test that operations involving the empty distribution throw
d = mi.IrregularContinuousDistribution()
assert d.empty()
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1], [1])
assert 'needs at least two entries' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2], [1])
assert 'size mismatch' in str(excinfo.value)
def test15_irrcont_empty_invalid_range(variants_all_backends_once):
# Test that invalid range specifications throw an exception
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([2, 1], [1, 1])
assert 'strictly increasing' in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 1], [1, 1])
assert 'strictly increasing' in str(excinfo.value)
def test16_irrcont_zero_prob(variants_all_backends_once):
# Test that operations involving the empty distribution throw
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2, 3], [0, 0, 0])
assert "no probability mass found" in str(excinfo.value)
def test17_irrcont_neg_prob(variants_all_backends_once):
# Test that operations involving negative probability mass throw
with pytest.raises(RuntimeError) as excinfo:
mi.IrregularContinuousDistribution([1, 2, 3], [1, -1, 1])
assert "entries must be non-negative" in str(excinfo.value)
def test18_irrcont_simple_function(variants_vec_backends_once):
# Reference from Mathematica, mi.Float
d = mi.IrregularContinuousDistribution([1, 1.5, 1.8, 5], [1, 3, 0, 1])
assert dr.allclose(d.max(), 3.0)
assert dr.allclose(d.integral(), 3.05)
assert dr.allclose(
d.eval_pdf([0, 1, 2, 3, 4, 5, 6]),
[0, 1, 0.0625, 0.375, 0.6875, 1, 0]
)
assert dr.allclose(
d.eval_cdf([0, 1, 2, 3, 4, 5, 6]),
[0, 0, 1.45625, 1.675, 2.20625, 3.05, 3.05]
)
assert dr.allclose(
d.sample(dr.linspace(mi.Float, 0, 1, 11)),
[1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282,
3.35949, 3.8938, 4.31714, 4.67889, 5.]
)
assert dr.allclose(
d.sample_pdf(dr.linspace(mi.Float, 0, 1, 11)),
([1., 1.21368, 1.35622, 1.47111, 1.58552, 2.49282,
3.35949, 3.8938, 4.31714, 4.67889, 5.],
mi.Float([1., 1.85472, 2.42487, 2.88444, 2.14476, 0.216506,
0.48734, 0.654313, 0.786607, 0.899653, 1.])
* d.normalization())
)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Frames Explorer Plugin."""
# Local imports
from spyder.config.base import _
from spyder.api.plugins import Plugins, SpyderDockablePlugin
from spyder.api.plugin_registration.decorators import (
on_plugin_available, on_plugin_teardown)
from spyder.plugins.framesexplorer.confpage import FramesExplorerConfigPage
from spyder.plugins.framesexplorer.widgets.main_widget import (
FramesExplorerWidget)
from spyder.api.shellconnect.mixins import ShellConnectMixin
class FramesExplorer(SpyderDockablePlugin, ShellConnectMixin):
"""Frames Explorer plugin."""
NAME = 'frames_explorer'
REQUIRES = [Plugins.IPythonConsole, Plugins.Preferences]
OPTIONAL = [Plugins.Editor]
TABIFY = [Plugins.VariableExplorer, Plugins.Help]
WIDGET_CLASS = FramesExplorerWidget
CONF_SECTION = NAME
CONF_FILE = False
CONF_WIDGET_CLASS = FramesExplorerConfigPage
DISABLE_ACTIONS_WHEN_HIDDEN = False
# ---- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Frames explorer')
def get_description(self):
return _('Display and explore frames while debugging.')
def get_icon(self):
return self.create_icon('dictedit')
def on_initialize(self):
pass
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
editor = self.get_plugin(Plugins.Editor)
self.get_widget().edit_goto.connect(editor.load)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
editor = self.get_plugin(Plugins.Editor)
self.get_widget().edit_goto.disconnect(editor.load)
|
import requests
import json
def get_canvas_data(domain, path, access_key):
payload = {
'per_page': 100,
'access_token': access_key
}
r = requests.get(domain + path, data=payload)
data_list = r.json()
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], data=payload)
data_list += r.json()
return data_list
# Config for
domain = "https://canvas.sydney.edu.au/"
course_id = COURSE_ID
access_key = "ACCESS_KEY"
# Get data from canvas
enrollments = get_canvas_data(domain, f"/api/v1/courses/{course_id}/enrollments", access_key)
# Save result as JSON
# JSON can be directly parsed into Pandas DataFrames later
output_file = open(f"enrollments.json", 'w')
json.dump(enrollments, output_file)
output_file.close()
|
"""Renders the documentation."""
__license__ = 'MIT'
from subprocess import run
from pathlib import Path
root = Path(__file__).resolve().parent.parent
process = run(['sphinx-build', 'docs', 'deploy/docs'], cwd=root)
if process.returncode:
raise RuntimeError('Error while rendering documentation.')
|
from collections import Counter
from bs4 import BeautifulSoup
import sys
from processors.abstract_processor import AbstractProcessor
from utils.text_utils import tokenizer
__author__ = 'Shyam'
class SectionProcessor(AbstractProcessor):
def __init__(self, wikipath, lang):
super(SectionProcessor, self).__init__(wikipath)
self.lang = lang
self.section_cnt = Counter()
def process_file(self, f_handle):
page_content = ""
new_page = False
page_id, page_title = None, None
sections = []
for line in f_handle:
if line.startswith("<doc id="):
new_page = True
if page_content:
self.process_wikicontent(page_content, page_id, page_title, sections)
page_content = ""
soup = BeautifulSoup(line, "html.parser")
doc = soup.find("doc")
page_id = doc["id"]
page_title = doc["title"].replace(" ", "_")
sections = []
else:
if new_page:
new_page = False
continue # do not add this line as it contains title
if "a href" not in line and len(line.strip()) > 0:
tokens = tokenizer(line, lang=self.lang)
if len(tokens) < 4:
sections.append(line.strip())
continue
page_content += line
def process_wikicontent(self, page_content, page_id, page_title, sections):
self.section_cnt.update(sections)
# print (page_content)
def finish(self):
for k in self.section_cnt.most_common():
print(k)
if __name__ == '__main__':
p = SectionProcessor(sys.argv[1], lang="en")
p.run()
|
from marketsim import (registry, types, _, ops, event)
from .._basic import Strategy
from .._wrap import wrapper2
from ..side import FundamentalValue
class _Suspendable_Impl(Strategy):
def __init__(self):
Strategy.__init__(self)
event.subscribe(self.inner.on_order_created, _(self).onOrderCreated, self)
@property
def suspended(self):
return not self.predicate()
def onOrderCreated(self, order, source):
if self.predicate():
self._send(order)
exec wrapper2("Suspendable",
"",
[
('inner', 'FundamentalValue()', 'types.ISingleAssetStrategy'),
('predicate', 'ops.constant(True)', 'types.IFunction[bool]')
], register=False)
|
#!/usr/bin/evn python
# coding:utf-8
import sys
sys.path.append('..')
import time
from lib.science import *
from waypoint import Waypoint
from lib.config import config
from lib.logger import logger
class Attribute(object):
def __init__(self, ORB):
self.ORB = ORB
logger.info('Drone Type:{}'.format(config.drone['UAV']))
logger.info('MainController:{}'.format(config.drone['MainController']))
self._model = config.drone['Model']
# Aileron :[No.ch, low ,mid, high ,var, sign, rate]
self.AIL = config.channels['AIL']
# Elevator:[No.ch, low ,mid, high ,var, sign, rate]
self.ELE = config.channels['ELE']
# Throttle:[No.ch, low ,mid, high ,var, sign, rate]
self.THR = config.channels['THR']
# Rudder :[No.ch, low ,mid, high ,var, sign, rate]
self.RUD = config.channels['RUD']
# Mode :[No.ch , low , Loiter, high]
self.mode = config.channels['Mode']
if config.drone['Model'] == 'HELI':
# GYRO Rate :[No.ch , 0 , pwm]
self.Rate = config.channels['Rate']
# PITCH :[No.ch , 0 , pwm]
self.PIT = config.channels['PIT']
else:
# Aux1 :[No.ch , 0 , pwm]
self.Aux1 = config.channels['Aux1']
# Aux2 :[No.ch , 0 , pwm]
self.Aux2 = config.channels['Aux2']
# Switch :[No.ch , 0 , pwm]
self.Switch = config.channels['Switch']
self.wp = Waypoint(ORB)
self.update_home()
self.init_altitude()
def update_home(self):
logger.info('Waiting for home location')
try:
home = self.get_location()
self.publish('HomeLocation', home)
logger.info('Home location :{}'.format(home))
except AssertionError, e:
logger.error(e)
def init_altitude(self):
logger.info('Waiting for init altitude')
try:
init_alt = self.get_altitude(False)
self.publish('InitAltitude', init_alt)
logger.info('Init Altitude :{}'.format(init_alt))
except AssertionError, e:
logger.error(e)
def get_stars(self):
return self.subscribe('NumStars')
def isArmed(self):
return self.subscribe('Armed')
def _armed(self):
self.publish('Armed', True)
def _disarmed(self):
self.publish('Armed', False)
def download(self, index=1):
try:
location = self.get_location()
self.wp.download(location, index)
except AssertionError, e:
logger.error(e)
def Phase(self):
phase = [0] * 8
phase[self.AIL[0]] = self.AIL[5]
phase[self.ELE[0]] = self.ELE[5]
phase[self.THR[0]] = self.THR[5]
phase[self.RUD[0]] = self.RUD[5]
# phase[self.mode[0]] = 1
if config.drone['Model'] == 'HELI':
phase[self.PIT[0]] = self.PIT[5]
return phase
def set_channels_mid(self):
logger.info('Catching Loiter PWM...')
if self.state('Sbus'):
mid = self.subscribe('ChannelsInput')
else:
logger.error('Sbus receiver is not health')
return False
logger.info('Channels Mid:{}'.format(mid))
self.publish('LoiterPWM', mid)
self.AIL[2] = mid[self.AIL[0]]
self.ELE[2] = mid[self.ELE[0]]
self.THR[2] = mid[self.THR[0]]
self.RUD[2] = mid[self.RUD[0]]
if self._model == 'HELI':
self.Rate[2] = mid[self.Rate[0]]
self.PIT[2] = mid[self.PIT[0]]
return True
def set_gear(self, Gear):
if int(Gear) in [1, 2, 3]:
self.publish('Gear', int(Gear) - 1)
return True
else:
return False
def set_target(self, dNorth, dEast):
try:
origin = self.get_location()
target = get_location_metres(origin, dNorth, dEast)
self.publish('Target', target)
logger.info('Target is {}'.format(target))
return True
except AssertionError, e:
logger.error(e)
return False
def set_target_angle(self, distance, angle):
angle = (360 + angle) % 360
dNorth = round(cos(angle) * distance, 2)
dEast = round(sin(angle) * distance, 2)
self.set_target(dNorth, dEast)
def get_target(self):
return self.ORB.get_target()
def get_home(self):
return self.ORB.get_home()
def get_location(self):
return self.ORB.get_location()
def get_heading(self):
return self.ORB.get_heading()
def get_altitude(self, relative=False):
return self.ORB.get_altitude(relative)
def publish(self, topic, value):
self.ORB.publish(topic, value)
def subscribe(self, topic):
return self.ORB.subscribe(topic)
def state(self, module):
return self.ORB.state(module)
def has_module(self, module):
return config.has_module(module)
if __name__ == "__main__":
from AF_uORB.uORB import uORB
ORB = uORB()
try:
print attr.get_location()
print attr.get_home()
print attr.get_target()
print attr.get_heading()
except AssertionError, e:
logger.error(e)
|
import random
class Grid:
"""Represents a grid map stored in a tuple of tuples."""
def __init__(self, filename):
grid_li = []
with open(filename, 'r') as f:
for line in f:
temp = []
for c in line:
if c == ' ':
temp.append(None) # blank space
elif c == '0':
temp.append(0) # 0s are regular terrain
elif c == '1':
temp.append(1) # 1s are objects
grid_li.append(temp)
max_row_length = max([len(row) for row in grid_li])
for row in grid_li:
if len(row) < max_row_length:
for i in xrange(max_row_length - len(row)):
row.append(None)
self.grid = tuple(tuple(row) for row in grid_li)
def __str__(self):
return '\n'.join([''.join([' ' if x is None else str(x) for x in row]) for row in self.grid])
def get_adjacent_positions(self, pos):
adjacent_positions = []
if pos[0]-1 >= 0 and self.grid[pos[0]-1][pos[1]] == 0:
adjacent_positions.append((pos[0]-1,pos[1]))
if pos[0]+1 < len(self.grid) and self.grid[pos[0]+1][pos[1]] == 0:
adjacent_positions.append((pos[0]+1,pos[1]))
if pos[1]-1 >= 0 and self.grid[pos[0]][pos[1]-1] == 0:
adjacent_positions.append((pos[0], pos[1]-1))
if pos[1]+1 < len(self.grid[0]) and self.grid[pos[0]][pos[1]+1] == 0:
adjacent_positions.append((pos[0], pos[1]+1))
return adjacent_positions
def display_path(self, path):
"""Prints the grid with a path displayed with 8's."""
path_positions = set(path.path_positions)
print "Path from {} to {}, length {}:".format(path.path_positions[0], path.path_positions[-1], len(path_positions) - 1)
display_str = ''
for row in xrange(len(self.grid)):
for col in xrange(len(self.grid[row])):
if (row, col) in path_positions: display_str += '+'
elif self.grid[row][col] is None: display_str += ' '
elif self.grid[row][col] == 1: display_str += 'o'
elif self.grid[row][col] == 0: display_str += '.'
display_str += '\n'
print display_str,
print "Method used: {}; Time taken: {}s".format(path.method, path.time_taken)
def get_random_position(self):
"""Returns a random position where there is regular terrain."""
# make a list of all regular terrain positions in grid
regular_terrain = [(row, col) for row in xrange(len(self.grid)) for col in xrange(len(self.grid[row])) if self.grid[row][col] == 0]
return random.choice(regular_terrain)
class RandomGrid(Grid):
"""Creates a random grid upon initialization. The dimensions are passed in to the constructor."""
def __init__(self, width, height, obj_rand = 4, space_rand = 4):
self.grid = tuple(tuple(None if random.randint(0,space_rand) == 0 else (1 if random.randint(0,obj_rand) == 0 else 0) for x in xrange(width)) for row in xrange(height))
|
#primeiramente definimos as variaveis
a=float(input("numero a"))
b=float(input("numero b"))
c=float(input("numero c"))
#definmos a variavel da formula matematica
chernobyl=((a**2)+(b**2)+(c**2))/(a+b+c)
real=round(chernobyl, 7)
#imprimimos o resultado
print(real)
|
from flask import Blueprint, request
from datetime import datetime
from bs4 import BeautifulSoup
from models import ProvView, TotalView, db
import json
from common import get_page, wrap_response
from sqlalchemy import func, and_, or_
import pytz
wuhan = Blueprint(
'wuhan',
__name__,
url_prefix='/wuhan'
)
@wuhan.route('/api/scheduler')
def index():
response = get_page()
if not response:
return wrap_response("fail")
soup = BeautifulSoup(response.text, 'lxml')
area_stat = soup.find(id='getAreaStat')
total_stat = soup.find(id='getStatisticsService')
area_data = area_stat.text.split('getAreaStat =')[-1].split('}catch')[0]
area_result = json.loads(area_data)
overview_data = total_stat.text.split('getStatisticsService =')[-1].split('}catch')[0]
overview_result = json.loads(overview_data)
confirmed_cnt = overview_result.get('confirmedCount')
suspected_cnt = overview_result.get('suspectedCount')
cured_cnt = overview_result.get('curedCount')
dead_cnt = overview_result.get('deadCount')
tz = pytz.timezone('Asia/Shanghai')
tm = datetime.now(tz=tz).strftime("%Y-%m-%d %H:%M:%S")
total_view = TotalView(tm, confirmed_cnt, suspected_cnt, dead_cnt, cured_cnt)
db.session.add(total_view)
db.session.commit()
for item in area_result:
name = item.get('provinceShortName')
confirmed = item.get('confirmedCount')
cured = item.get('curedCount')
dead = item.get('deadCount')
prov = ProvView(tm, name, confirmed, cured, dead)
db.session.add(prov)
db.session.commit()
return wrap_response("success")
@wuhan.route('/plot')
def plot():
total_view = TotalView.query.filter().all()
total = [x.sure + x.suspicion for x in total_view]
confirmed_vals = [x.sure for x in total_view]
suspicion_vals = [x.suspicion for x in total_view]
cured_vals = [x.cured for x in total_view]
dead_vals = [x.dead for x in total_view]
dates = [x.added_time for x in total_view]
return wrap_response(0, msg={
'total': total,
'confirmed_vals': confirmed_vals,
'suspicion_vals': suspicion_vals,
'cured_vals': cured_vals,
'dead_vals': dead_vals,
"dates": dates
})
@wuhan.route('/prov_plot')
def prov_plot():
prov_view = db.session.query(ProvView.added_time,
func.sum(ProvView.cured).label('cured'),
func.sum(ProvView.dead).label('dead'),
func.sum(ProvView.for_sure).label('sure'))\
.filter(ProvView.prov != "湖北")\
.group_by(ProvView.added_time).all()
total = [int(x.sure) for x in prov_view]
confirmed_vals = [int(x.sure) for x in prov_view]
# suspicion_vals = [x.suspicion for x in prov_view]
cured_vals = [int(x.cured) for x in prov_view]
dead_vals = [int(x.dead) for x in prov_view]
dates = [x.added_time for x in prov_view]
print({
'total': total,
'confirmed_vals': confirmed_vals,
# 'suspicion_vals': suspicion_vals,
'cured_vals': cured_vals,
'dead_vals': dead_vals,
"dates": dates
})
return wrap_response(0, msg={
'total': total,
'confirmed_vals': confirmed_vals,
# 'suspicion_vals': suspicion_vals,
'cured_vals': cured_vals,
'dead_vals': dead_vals,
"dates": dates
})
@wuhan.route('/data', methods=['GET'])
def get_data_by_params():
view = request.args.get('view')
time_from = request.args.get('from', '2020-01-01')
time_to = request.args.get('to', datetime.now().strftime('%Y-%m-%d')) + 'z'
data = []
hour = db.session.query(ProvView.added_time).order_by(ProvView.id.desc()).first().added_time
if view == 'hubei':
view_data = db.session.query(ProvView.added_time,
func.sum(ProvView.cured).label('cured'),
func.sum(ProvView.dead).label('dead'),
func.sum(ProvView.for_sure).label('confirmed')) \
.filter(ProvView.prov == "湖北") \
.filter(or_(ProvView.added_time.like('%10:0%'), ProvView.added_time.like('%22:0%'))) \
.filter(and_(ProvView.added_time >= time_from, ProvView.added_time <= time_to)) \
.group_by(ProvView.added_time).all()
last_data = db.session.query(ProvView.added_time,
func.sum(ProvView.cured).label('cured'),
func.sum(ProvView.dead).label('dead'),
func.sum(ProvView.for_sure).label('confirmed')) \
.filter(ProvView.prov == "湖北") \
.filter(ProvView.added_time.like(f'%{hour}%')) \
.group_by(ProvView.added_time).all()[-1]
elif view == 'except':
view_data = db.session.query(ProvView.added_time,
func.sum(ProvView.cured).label('cured'),
func.sum(ProvView.dead).label('dead'),
func.sum(ProvView.for_sure).label('confirmed')) \
.filter(ProvView.prov != "湖北") \
.filter(or_(ProvView.added_time.like('%10:0%'), ProvView.added_time.like('%22:0%'))) \
.filter(and_(ProvView.added_time >= time_from, ProvView.added_time <= time_to)) \
.group_by(ProvView.added_time).all()
last_data = db.session.query(ProvView.added_time,
func.sum(ProvView.cured).label('cured'),
func.sum(ProvView.dead).label('dead'),
func.sum(ProvView.for_sure).label('confirmed')) \
.filter(ProvView.prov != "湖北") \
.filter(ProvView.added_time.like(f'%{hour}%')) \
.group_by(ProvView.added_time).all()[-1]
else:
view_data = db.session.query(TotalView.added_time,
func.sum(TotalView.cured).label('cured'),
func.sum(TotalView.dead).label('dead'),
func.sum(TotalView.suspicion).label('suspicion'),
func.sum(TotalView.sure).label('confirmed')) \
.filter(or_(TotalView.added_time.like('%10:0%'), TotalView.added_time.like('%22:0%'))) \
.filter(and_(TotalView.added_time >= time_from, TotalView.added_time <= time_to)) \
.group_by(TotalView.added_time).all()
last_data = db.session.query(TotalView.added_time,
func.sum(TotalView.cured).label('cured'),
func.sum(TotalView.dead).label('dead'),
func.sum(TotalView.suspicion).label('suspicion'),
func.sum(TotalView.sure).label('confirmed')) \
.filter(TotalView.added_time.like(f'%{hour}%')) \
.group_by(TotalView.added_time).all()[-1]
for item in view_data:
tm = item.added_time
tm_key = datetime.strptime(tm, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M')
try:
suspicion = int(item.suspicion)
except:
suspicion = 0
confirmed = int(item.confirmed)
cured = int(item.cured)
dead = int(item.dead)
point = {
'total': confirmed + suspicion,
'confirmed': confirmed,
'cured': cured,
'dead': dead,
'date': tm_key
}
if view not in ('hubei', 'except'):
point.update({'suspicion': suspicion})
data.append(point)
if hour not in ('10:0', '22:0'):
tm = last_data.added_time
tm_key = datetime.strptime(tm, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M')
try:
suspicion = int(last_data.suspicion)
except:
suspicion = 0
confirmed = int(last_data.confirmed)
cured = int(last_data.cured)
dead = int(last_data.dead)
point = {
'total': confirmed + suspicion,
'confirmed': confirmed,
'cured': cured,
'dead': dead,
'date': tm_key
}
if view not in ('hubei', 'except'):
point.update({'suspicion': suspicion})
data.append(point)
return wrap_response(0, msg=view, data=data)
|
from sys import stdin
input = stdin.readline
from collections import defaultdict
N, M, V = map(int, input().split())
edges = defaultdict(list)
for _ in range(M):
a, b = map(int, input().split())
edges[a].append(b)
edges[b].append(a)
for k in edges.keys():
edges[k].sort()
# dfs
visited = []
stack = [V]
while len(stack):
vertex = stack.pop(-1)
if vertex in visited:
continue
visited.append(vertex)
stack += edges[vertex][::-1]
print(' '.join(map(str, visited)))
# bfs
visited = []
queue = [V]
while len(queue):
vertex = queue.pop(0)
if vertex in visited:
continue
visited.append(vertex)
queue += edges[vertex]
print(' '.join(map(str, visited))) |
from sets import Set
from math import log
import re
import operator
import sys
P = Set()
NP = Set()
Si = Set()
M = dict()
Mcopy = dict()
L = dict()
PR = dict()
newPR = dict()
d = 0.85
d1 = 1 - d
perplexity_list = []
infile = sys.argv[1]
input_file = open(infile,"r")
perplexity_output = open("perplexity.txt","w")
def getPage(line) :
return line.split().pop(0)
def getInLinks(line) :
if " " not in line :
return []
listOfInLinksForLine = line.split()
listOfInLinksForLine.pop(0)
return Set(listOfInLinksForLine)
def hasNotConverged() :
H = 0
for p in P :
H += PR[p] * log(1.0/PR[p], 2)
perplexity = 2**H
perplexity_output.write(str(perplexity))
perplexity_output.write("\n")
length_of_perplexity_list = len(perplexity_list)
if length_of_perplexity_list > 0 :
if abs(perplexity_list[length_of_perplexity_list - 1] - perplexity) < 1 :
perplexity_list.append(perplexity)
else :
del perplexity_list[:]
else :
perplexity_list.append(perplexity)
if len(perplexity_list) == 4 :
return False
return True
def initialize() :
for line in input_file :
page = getPage(str(line))
P.add(page)
M[page] = Set()
listOfInLinks = getInLinks(str(line))
for inLink in listOfInLinks :
M[page].add(str(inLink))
NP.add(str(inLink))
if str(inLink) in L :
L[str(inLink)] = L[str(inLink)] + 1
else :
L[str(inLink)] = 1
initialize()
Si = P.difference(NP)
N = len(P)
for p in P :
PR[p] = 1.0/N
#count = 1
#while count <= 100 :
while hasNotConverged() :
sinkPR = 0
for sink in Si :
sinkPR += PR[sink]
for p in P :
newPR[p] = d1/N
newPR[p] += d*sinkPR/N
for q in M[p]:
newPR[p] += d*PR[q]/L[q]
for p in P :
PR[p] = newPR[p]
file_of_sorted_items = open("sortedPageRank.txt","w")
sorted_list = sorted(PR.items(), key=operator.itemgetter(1), reverse=True)
top50 = 1
for p in sorted_list :
file_of_sorted_items.write(p[0] + " " + str(p[1]))
file_of_sorted_items.write("\n")
top50 += 1
if top50 > 50 :
break
for key, value in M.items() :
Mcopy[key] = len(value)
file_of_sorted_items_inLinks = open("sortedInLinks.txt","w")
sorted_list = sorted(Mcopy.items(), key=operator.itemgetter(1), reverse=True)
top50 = 1
for p in sorted_list :
file_of_sorted_items_inLinks.write(p[0] + " " + str(p[1]))
file_of_sorted_items_inLinks.write("\n")
top50 += 1
if top50 > 50:
break
input_file.close()
perplexity_output.close()
file_of_sorted_items.close()
file_of_sorted_items_inLinks.close()
|
import viz
import vizact
import vizshape
import vizinfo
import viztracker
viz.setMultiSample(4)
viz.fov(60)
viztracker.go()
myTracker = viz.add('sensor.dls')
viztracker.get("movable").setPosition([0,-1.8,-1])
z_coordinates=[-1,-3,-5,-7,-12,-17,-53,-75,-400]
x_coordinates=[-5,0,5,10,17,30,50,80,-400]
MOVE_SPEED= 40
count=0
mode=0
mercuryTexture= viz.addTexture('Mercury.jpg')
mercury= vizshape.addSphere(radius=0.38, slices=20,stacks=20, pos=(0,0,0))
mercury.texture(mercuryTexture)
mercury.alpha(1.0)
mercury.addAction( vizact.spin(0,1,0,5) )
marsTexture= viz.addTexture('Mars.jpg')
mars= vizshape.addSphere(radius=0.53, slices=20,stacks=20, pos=(0,0,-1))
mars.texture(marsTexture)
mars.alpha(.9)
mars.addAction( vizact.spin(0,1,0,5) )
venusTexture= viz.addTexture('Venus.jpg')
venus= vizshape.addSphere(radius=0.95, slices=20,stacks=20, pos=(0,0,-2))
venus.texture(venusTexture)
venus.alpha(.8)
venus.addAction( vizact.spin(0,1,0,5) )
earthTexture= viz.addTexture('Earth.jpg')
earth= vizshape.addSphere(radius=1.0,slices=20,stacks=20, pos=(0,0,-4))
earth.texture(earthTexture)
earth.alpha(.8)
earth.addAction( vizact.spin(0,1,0,5) )
neptuneTexture= viz.addTexture('Neptune.jpg')
neptune= vizshape.addSphere(radius=3.8,slices=20,stacks=20, pos=(0,0,-4))
neptune.texture(neptuneTexture)
neptune.alpha(.8)
neptune.addAction( vizact.spin(0,1,0,5) )
uranusTexture= viz.addTexture('Uranus.jpg')
uranus= vizshape.addSphere(radius=4.00,slices=20,stacks=20, pos=(0,0,-8))
uranus.texture(uranusTexture)
uranus.alpha(.8)
uranus.addAction( vizact.spin(0,1,0,5) )
saturnRingTexture= viz.addTexture('Rings_2.png')
saturnRing= vizshape.addCircle(15, slices=20)
saturnRing.setEuler(90, 90, 0)
saturnRing.setPosition(0,0,-32)
saturnRing.alpha(.8)
saturnRing.texture(saturnRingTexture)
saturnTexture= viz.addTexture('Saturn.jpg')
saturn= vizshape.addSphere(radius=9.45,slices=20,stacks=20, pos=(0,0,-32))
saturn.texture(saturnTexture)
saturn.alpha(.8)
saturn.addAction( vizact.spin(0,1,0,5) )
saturnRing.addAction( vizact.spin(0,0,1,1) )
jupiterTexture= viz.addTexture('Jupiter_2.jpg')
jupiter= vizshape.addSphere(radius=11.2, slices=20, stacks=20, pos=(0,0,-43))
jupiter.texture(jupiterTexture)
jupiter.alpha(.8)
jupiter.addAction( vizact.spin(0,1,0,5) )
sunTexture= viz.addTexture('Sun_2.jpg')
sun= vizshape.addSphere(radius=173, slices=20, stacks=20, pos=(0,0,-40))
sun.texture(sunTexture)
sun.alpha(.8)
sun.addAction( vizact.spin(0,1,0,5) )
def updateView():
if viz.key.isDown(viz.KEY_UP):
viz.MainView.move([0,0,MOVE_SPEED*viz.elapsed()],viz.BODY_ORI)
elif viz.key.isDown(viz.KEY_DOWN):
viz.MainView.move([0,0,-MOVE_SPEED*viz.elapsed()],viz.BODY_ORI)
elif viz.key.isDown(viz.KEY_LEFT):
viz.MainView.move([-MOVE_SPEED*viz.elapsed(),0,0],viz.BODY_ORI)
elif viz.key.isDown(viz.KEY_RIGHT):
viz.MainView.move([MOVE_SPEED*viz.elapsed(),0,0],viz.BODY_ORI)
vizact.ontimer(0,updateView)
def onButtonDown(e):
if e.button is 0 and count>=0:
count= count-1
if count>0 and mode is 0:
viz.MainView.setPosition([0,0,z_coordinates[count]])
viz.MainView.setEuler([0,0,0])
if count>0 and mode is 1:
viz.MainView.setPosition([x_coordinates[count],-3,-30])
viz.MainView.setEuler([0,0,0])
if count<0:
count=0
elif e.button is 1:
count= count+1
if count<7 and mode is 0:
viz.MainView.setPosition([0,0,z_coordinates[count]])
viz.MainView.setEuler([0,0,0])
if count<7 and mode is 1:
viz.MainView.setPosition([x_coordinates[count],-3,-30])
viz.MainView.setEuler([0,0,0])
if count>7:
count=7
if count is 7:
viz.MainView.setPosition([x_coordinates[count],-3,-30])
elif e.button is 2:
mode=0
count=0
viztracker.get("movable").setPosition([0,-1.8,-1])
mercury.setPosition([0,0,0])
mars.setPosition([0,0,-1])
venus.setPosition([0,0,-2])
earth.setPosition([0,0,-4])
neptune.setPosition([0,0,-4])
uranus.setPosition([0,0,8])
saturnRing.setPosition([0,0,-32])
saturn.setPosition([0,0,-32])
jupiter.setPosition([0,0,-43])
sun.setPosition([0,0,-43])
elif e.button is 3:
mode=1
count=0
viztracker.get("movable").setPosition([0,-1.8,-1])
mercury.setPosition([-5,0,0])
mars.setPosition([0,0,0])
venus.setPosition([5,0,0])
earth.setPosition([10,0,0])
neptune.setPosition([17,0,0])
uranus.setPosition([30,0,0])
saturnRing.setPosition([50,0,1])
saturn.setPosition([50,0,0])
jupiter.setPosition([80,0,0])
sun.setPosition([0,0,0])
vizact.ontimer(0,onButtonDown)
#array controls (look up VizTracker)
#change of view (cross between version 1 and version 2)
|
from ..grpc_gen import status_pb2
from ..grpc_gen.milvus_pb2 import TopKQueryResult as Grpc_Result
from ..client.abstract import TopKQueryResult
from ..client.exceptions import ParamError
def merge_results(results_list, topk, *args, **kwargs):
"""
merge query results
"""
def _reduce(source_ids, ids, source_diss, diss, k, reverse):
"""
"""
if source_diss[k - 1] <= diss[0]:
return source_ids, source_diss
if diss[k - 1] <= source_diss[0]:
return ids, diss
source_diss.extend(diss)
diss_t = enumerate(source_diss)
diss_m_rst = sorted(diss_t, key=lambda x: x[1], reverse=reverse)[:k]
diss_m_out = [id_ for _, id_ in diss_m_rst]
source_ids.extend(ids)
id_m_out = [source_ids[i] for i, _ in diss_m_rst]
return id_m_out, diss_m_out
status = status_pb2.Status(error_code=status_pb2.SUCCESS,
reason="Success")
reverse = kwargs.get('reverse', False)
raw = kwargs.get('raw', False)
if not results_list:
return status, [], []
merge_id_results = []
merge_dis_results = []
row_num = 0
for files_collection in results_list:
if not isinstance(files_collection, Grpc_Result) and \
not isinstance(files_collection, TopKQueryResult):
return ParamError("Result type is unknown.")
row_num = files_collection.row_num
if not row_num:
continue
ids = files_collection.ids
diss = files_collection.distances # distance collections
# Notice: batch_len is equal to topk, may need to compare with topk
batch_len = len(ids) // row_num
for row_index in range(row_num):
id_batch = ids[row_index * batch_len: (row_index + 1) * batch_len]
dis_batch = diss[row_index * batch_len: (row_index + 1) * batch_len]
if len(merge_id_results) < row_index:
raise ValueError("merge error")
if len(merge_id_results) == row_index:
merge_id_results.append(id_batch)
merge_dis_results.append(dis_batch)
else:
merge_id_results[row_index], merge_dis_results[row_index] = \
_reduce(merge_id_results[row_index], id_batch,
merge_dis_results[row_index], dis_batch,
batch_len,
reverse)
id_mrege_list = []
dis_mrege_list = []
for id_results, dis_results in zip(merge_id_results, merge_dis_results):
id_mrege_list.extend(id_results)
dis_mrege_list.extend(dis_results)
raw_result = Grpc_Result(
status=status,
row_num=row_num,
ids=id_mrege_list,
distances=dis_mrege_list
)
return raw_result if raw else TopKQueryResult(raw_result)
|
print ('meu primeiro programa em Python')
a = 2
b = 3
soma = a + b
print(soma)
#Interagir com usuario
a = int(input('Entre com o primeiro valor:'))
b = int(input('Entre com o segundo valor:'))
print(type(a))
soma = a + b
subtracao = a - b
multiplicacao = a * b
divisao = a / b
resto = a % b
print ('soma: {}'.format(soma))
print('soma: ' + str(soma)) # = print(soma) mas com texto
print(subtracao)
print(multiplicacao)
print(int(divisao)) #o int tira a casa dos decimais/aredonda o numero
print(resto)
|
def group_words(words):
groups = {}
for word in words:
sorted_word = ''.join(sorted(word))
if sorted_word not in groups:
groups[sorted_word] = []
groups[sorted_word].append(word)
ans = []
for sorted_word in groups:
ans.append(groups[sorted_word])
return ans
print(group_words(['eat', 'tea', 'tan', 'ate', 'nat', 'bat']))
|
import asyncio
import logging
from dataclasses import dataclass
from decimal import Decimal
from typing import List, Optional
from hummingbot.core.data_type.trade_fee import TokenAmount, TradeFeeBase
from hummingbot.core.event.events import OrderType, TradeType
from hummingbot.core.rate_oracle.rate_oracle import RateOracle
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.core.utils.estimate_fee import build_trade_fee
from hummingbot.logger import HummingbotLogger
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
s_decimal_nan = Decimal("NaN")
s_decimal_0 = Decimal("0")
arbprop_logger: Optional[HummingbotLogger] = None
@dataclass
class ArbProposalSide:
"""
An arbitrage proposal side which contains info needed for order submission.
"""
market_info: MarketTradingPairTuple
is_buy: bool
quote_price: Decimal
order_price: Decimal
amount: Decimal
extra_flat_fees: List[TokenAmount]
completed_event: asyncio.Event = asyncio.Event()
failed_event: asyncio.Event = asyncio.Event()
def __repr__(self):
side = "buy" if self.is_buy else "sell"
return f"Connector: {self.market_info.market.display_name} Side: {side} Quote Price: {self.quote_price} " \
f"Order Price: {self.order_price} Amount: {self.amount} Extra Fees: {self.extra_flat_fees}"
@property
def is_completed(self) -> bool:
return self.completed_event.is_set()
@property
def is_failed(self) -> bool:
return self.failed_event.is_set()
def set_completed(self):
self.completed_event.set()
def set_failed(self):
self.failed_event.set()
class ArbProposal:
@classmethod
def logger(cls) -> HummingbotLogger:
global arbprop_logger
if arbprop_logger is None:
arbprop_logger = logging.getLogger(__name__)
return arbprop_logger
"""
An arbitrage proposal which contains 2 sides of the proposal - one buy and one sell.
"""
def __init__(self, first_side: ArbProposalSide, second_side: ArbProposalSide):
if first_side.is_buy == second_side.is_buy:
raise Exception("first_side and second_side must be on different side of buy and sell.")
self.first_side: ArbProposalSide = first_side
self.second_side: ArbProposalSide = second_side
@property
def has_failed_orders(self) -> bool:
return any([self.first_side.is_failed, self.second_side.is_failed])
def profit_pct(
self,
rate_source: Optional[RateOracle] = None,
account_for_fee: bool = False,
) -> Decimal:
"""
Returns a profit in percentage value (e.g. 0.01 for 1% profitability)
Assumes the base token is the same in both arbitrage sides
"""
if not rate_source:
rate_source = RateOracle.get_instance()
buy_side: ArbProposalSide = self.first_side if self.first_side.is_buy else self.second_side
sell_side: ArbProposalSide = self.first_side if not self.first_side.is_buy else self.second_side
base_conversion_pair: str = f"{sell_side.market_info.base_asset}-{buy_side.market_info.base_asset}"
quote_conversion_pair: str = f"{sell_side.market_info.quote_asset}-{buy_side.market_info.quote_asset}"
sell_base_to_buy_base_rate: Decimal = Decimal(1)
sell_quote_to_buy_quote_rate: Decimal = rate_source.get_pair_rate(quote_conversion_pair)
buy_fee_amount: Decimal = s_decimal_0
sell_fee_amount: Decimal = s_decimal_0
result: Decimal = s_decimal_0
if sell_quote_to_buy_quote_rate and sell_base_to_buy_base_rate:
if account_for_fee:
buy_trade_fee: TradeFeeBase = build_trade_fee(
exchange=buy_side.market_info.market.name,
is_maker=False,
base_currency=buy_side.market_info.base_asset,
quote_currency=buy_side.market_info.quote_asset,
order_type=OrderType.MARKET,
order_side=TradeType.BUY,
amount=buy_side.amount,
price=buy_side.order_price,
extra_flat_fees=buy_side.extra_flat_fees
)
sell_trade_fee: TradeFeeBase = build_trade_fee(
exchange=sell_side.market_info.market.name,
is_maker=False,
base_currency=sell_side.market_info.base_asset,
quote_currency=sell_side.market_info.quote_asset,
order_type=OrderType.MARKET,
order_side=TradeType.SELL,
amount=sell_side.amount,
price=sell_side.order_price,
extra_flat_fees=sell_side.extra_flat_fees
)
buy_fee_amount: Decimal = buy_trade_fee.fee_amount_in_token(
trading_pair=buy_side.market_info.trading_pair,
price=buy_side.quote_price,
order_amount=buy_side.amount,
token=buy_side.market_info.quote_asset,
rate_source=rate_source
)
sell_fee_amount: Decimal = sell_trade_fee.fee_amount_in_token(
trading_pair=sell_side.market_info.trading_pair,
price=sell_side.quote_price,
order_amount=sell_side.amount,
token=sell_side.market_info.quote_asset,
rate_source=rate_source
)
buy_spent_net: Decimal = (buy_side.amount * buy_side.quote_price) + buy_fee_amount
sell_gained_net: Decimal = (sell_side.amount * sell_side.quote_price) - sell_fee_amount
sell_gained_net_in_buy_quote_currency: Decimal = (
sell_gained_net * sell_quote_to_buy_quote_rate / sell_base_to_buy_base_rate
)
result: Decimal = (
((sell_gained_net_in_buy_quote_currency - buy_spent_net) / buy_spent_net)
if buy_spent_net != s_decimal_0
else s_decimal_0
)
else:
self.logger().warning("The arbitrage proposal profitability could not be calculated due to a missing rate"
f" ({base_conversion_pair}={sell_base_to_buy_base_rate},"
f" {quote_conversion_pair}={sell_quote_to_buy_quote_rate})")
return result
def __repr__(self):
return f"First Side - {self.first_side}\nSecond Side - {self.second_side}"
def copy(self):
return ArbProposal(
ArbProposalSide(self.first_side.market_info, self.first_side.is_buy,
self.first_side.quote_price, self.first_side.order_price,
self.first_side.amount, self.first_side.extra_flat_fees),
ArbProposalSide(self.second_side.market_info, self.second_side.is_buy,
self.second_side.quote_price, self.second_side.order_price,
self.second_side.amount, self.second_side.extra_flat_fees)
)
async def wait(self):
return await safe_gather(*[self.first_side.completed_event.wait(), self.second_side.completed_event.wait()])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 31 01:18:11 2020
@author: amanda
"""
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(train_x, train_y)
prediction = clf.predict(test_x)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_esrimators=10)
clf.fit(train_x,train_y)
presiction =clf.presict(test_x) |
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from eralchemy import render_er
Base = declarative_base()
class Follower(Base):
__tablename__="follower"
ID=Column(Integer,primary_key=True)
user_from_id= Column (Integer,nullable=False)
user_to_id= Column(Integer,ForeignKey("user.id"),nullable=False)
user=relationship("User")
class User(Base):
__tablename__ = "user"
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
username = Column(String, nullable=False)
firstname= Column (String,nullable=False)
lastname= Column(String,nullable=False)
email= Column(String,nullable=False)
follower=relationship("Follower")
post=relationship("Post")
comment=relationship("Comment")
class Post(Base):
__tablename__="post"
id= Column(Integer,primary_key=True)
user_id=Column(Integer,ForeignKey("user.id"),nullable=False)
user=relationship("User")
media=relationship("Media")
class Media(Base):
__tablename__="media"
id = Column(Integer, primary_key=True)
type=Column(String,nullable=False)
url= Column(String,nullable=False)
post_id=Column(Integer, ForeignKey("post.id"),nullable=False)
post=relationship("Post")
class Comment(Base):
__tablename__="comment"
id = Column(Integer, primary_key=True)
comment_text=Column(String, nullable=False)
author_id=Column(Integer, ForeignKey("user.id"),nullable=False)
post_id=Column(Integer,ForeignKey("post.id") ,nullable=False)
user=relationship("User")
# class Address(Base):
# __tablename__ = 'address'
# # Here we define columns for the table address.
# # Notice that each column is also a normal Python instance attribute.
# id = Column(Integer, primary_key=True)
# street_name = Column(String(250))
# street_number = Column(String(250))
# post_code = Column(String(250), nullable=False)
# person_id = Column(Integer, ForeignKey('person.id'))
# person = relationship(Person)
def to_dict(self):
return {}
## Draw from SQLAlchemy base
render_er(Base, 'diagram.png') |
import psycopg2
import config
import psycopg2.extras
import json
import sys
class Database:
def __init__(self):
with psycopg2.connect(dbname=config.DB_NAME, user=config.USER_NAME, password=config.USER_PASSWORD,
host=config.DB_HOST) as connection:
self.connection = connection
self.cursor = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
def update_query(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
sys.stderr.write(str(e))
return json.dumps({'code': 1, 'message': 'Database error' + str(e), 'rows': []})
resp = json.dumps({'code': 0, 'message': 'Database succesfully updated', 'rows': []})
return resp
def select_query(self, query, params):
try:
self.cursor.execute(query, params)
except Exception as e:
sys.stderr.write(str(e))
return json.dumps({'code': 1, 'message': 'Database error' + str(e), 'rows': []})
if self.cursor.rowcount == 0:
response_rows = []
else:
response_rows = self.cursor.fetchall()
resp = json.dumps({'code': 0, 'message': 'Rows successfully selected', 'rows': response_rows})
return resp
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
layers = list()
layers += [nn.Conv2d(1, 64, 3, 2, 1), nn.BatchNorm2d(64), nn.ReLU()]
layers += [nn.Conv2d(64, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU()]
layers += [nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU()]
layers += [nn.Conv2d(128, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU()]
layers += [nn.Conv2d(256, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU()]
layers += [nn.Conv2d(256, 512, 3, 1, 1), nn.BatchNorm2d(512), nn.ReLU()]
layers += [nn.ConvTranspose2d(512, 256, 4, 2, 1), nn.BatchNorm2d(256), nn.ReLU()]
layers += [nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU()]
layers += [nn.ConvTranspose2d(256, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.ReLU()]
layers += [nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU()]
layers += [nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU()]
layers += [nn.Conv2d(64, 2, 3, 1, 1)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
out = self.layers(x.unsqueeze(1))
return out |
from OpenGL.GL import *
from OpenGL.GLU import *
import math as m
import numpy as np
from mesh import Mesh
class Cube( Mesh ) :
def __init__( self , n ) :
Mesh.__init__( self , buffers = self.gen_v( n , n ) )
def gen_v( self , nx , ny ) :
nx += 1
ny += 1
v = np.zeros( (6,nx,ny,3) , np.float64 )
n = np.zeros( (6,nx,ny,3) , np.float64 )
t = np.zeros( (6,2,nx-1,ny-1,3) , np.uint32 )
for x in range(nx) :
for y in range(ny) :
v[0,x,y] = np.array(( 0 , x/float(nx-1) , y/float(ny-1) ))
v[1,x,y] = np.array(( 1 , x/float(nx-1) , y/float(ny-1) ))
v[2,x,y] = np.array(( x/float(nx-1) , 1 , y/float(ny-1) ))
v[3,x,y] = np.array(( x/float(nx-1) , 0 , y/float(ny-1) ))
v[4,x,y] = np.array(( x/float(nx-1) , y/float(ny-1) , 0 ))
v[5,x,y] = np.array(( x/float(nx-1) , y/float(ny-1) , 1 ))
n[0,x,y] = np.array((-1 , 0 , 0 ))
n[1,x,y] = np.array(( 1 , 0 , 0 ))
n[2,x,y] = np.array(( 0 , 1 , 0 ))
n[3,x,y] = np.array(( 0 ,-1 , 0 ))
n[4,x,y] = np.array(( 0 , 0 ,-1 ))
n[5,x,y] = np.array(( 0 , 0 , 1 ))
for y in range(ny-1) :
for x in range(nx-1) :
for i in range(0,6,2) :
t[i,0,x,y] = np.array(( 0, 1, nx))+ x + y*nx + i*nx*ny
t[i,1,x,y] = np.array((1,nx+1,nx))+ x + y*nx + i*nx*ny
for i in range(1,6,2) :
t[i,0,x,y] = np.array(( 0, nx, 1))+ x + y*nx + i*nx*ny
t[i,1,x,y] = np.array((1,nx,nx+1))+ x + y*nx + i*nx*ny
return v , n , t
|
import data_preprocessing
import model
import tensorflow as tf
import numpy as np
# data loading (pickle)
dataset_detection_video, classlbl_to_classid = data_preprocessing.load_data()
# ====== GRID SEARCH TRAINING=========
frame_batch = [15]
lstm = [32]
relu = [16]
for i in lstm:
for j in relu:
for k in frame_batch:
print(str(i)+'-'+str(j)+'-'+str(k))
#features
coo, feat_type = data_preprocessing.cooccurrence(dataset_detection_video, k)
coint, feat_type = data_preprocessing.cointersection(dataset_detection_video, k)
for index, video in enumerate(coint):
video['sequence'] = np.concatenate((video['sequence'], coo[index]['sequence']),axis=1)
#splitting train & test
splitted_data = data_preprocessing.split_data(coint)
# create the graph
model.graph(splitted_data,i,j)
# train & save
model.train(splitted_data, classlbl_to_classid, 60, 32, feat_type, k)
'''
#========PREDICTION============
# data loading (pickle)
dataset_detection_video, classlbl_to_classid = data_preprocessing.load_data()
rnd_video_index = np.random.choice(len(dataset_detection_video),1)[0]
print(dataset_detection_video[rnd_video_index]['video_name'], dataset_detection_video[rnd_video_index]['class_id'])
#features
preprocessed_dataset, feat_type = data_preprocessing.cooccurrence([dataset_detection_video[rnd_video_index]], 15)
#network input formatting
X,y,seq_len = data_preprocessing.network_input(preprocessed_dataset)
print(y)
# restore & inference
test_y_true_lbl, test_y_pred_lbl = model.predict(X,y,seq_len, classlbl_to_classid)
model.video_pred(dataset_detection_video[rnd_video_index],classlbl_to_classid,test_y_true_lbl,test_y_pred_lbl)
''' |
import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
|
#Direct_Cam_ver_0.3.py
####################################
### running parameters
####################################
TOOL_DIAMETER=0.125
DRILL_PLUNGE_FEED_RATE = 2
HOME_X=0
HOME_Y=0
SAFE_HEIGHT=0.6
####################################
### CAM details
####################################
CUT_STEP_DEPTH=0.02
CUT_FACING_LAYER=0.02
DRILL_STEP_DEPTH=0.04
#lateral_overlap_PCT=0.5
####################################
### calculated quantities
####################################
#CUT_STEP_LATERAL=lateral_overlap_PCT*TOOL_DIAMETER
CUT_STEP_LATERAL=CUT_FACING_LAYER
CUT_PITCH_SIZE=CUT_STEP_DEPTH
TOOL_RADIUS = TOOL_DIAMETER/2
#cut_pitches=ceil(cut_depth/CUT_STEP_DEPTH)
#cut_radius = cut_diameter/2
####################################
### cut a helix function
####################################
def Helix_Cut(cut_center_x,cut_center_y,starting_z,cut_depth,cut_radius):
CUT_PITCH_SIZE = CUT_STEP_DEPTH
final_z=starting_z-cut_depth
while (starting_z>final_z+CUT_PITCH_SIZE):
Pitch_Cut(cut_center_x,cut_center_y,starting_z,CUT_PITCH_SIZE,cut_radius)
starting_z=starting_z-CUT_PITCH_SIZE
Pitch_Cut(cut_center_x,cut_center_y,starting_z,starting_z-final_z,cut_radius)
Pitch_Cut(cut_center_x,cut_center_y,final_z,0,cut_radius)
####################################
### cut a pitch function
####################################
def Pitch_Cut(cut_center_x,cut_center_y,starting_z,cut_pitch_depth,cut_radius):
#COMPENSATION
# ADD GLOBAL parameter ???
#cut_radius=cut_radius-TOOL_RADIUS
#if cut_radius<0:
# print ("warning radius compansaion has resulted in NEGATIVE radius tool too large")
starting_x = cut_center_x
starting_y = cut_center_y+cut_radius
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(starting_x,starting_y,starting_z)
print "G17"
print "G2 X{:.4f} Y{:.4f} Z{:.4f} I{:.4f} J{:.4f}".format(starting_x,starting_y,starting_z-cut_pitch_depth,0,-1*cut_radius)
####################################
### drill a hole function
####################################
def Drill_Plunge(cut_center_x,cut_center_y,starting_z,bottom_z):
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(cut_center_x,cut_center_y,starting_z)
print "G1 X{:.4f} Y{:.4f} z{:.4f} f{:.4f}".format(cut_center_x,cut_center_y,bottom_z,DRILL_PLUNGE_FEED_RATE)
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(cut_center_x,cut_center_y,starting_z)
####################################
### drill a hole carefully removing material function
####################################
def Drill_MultiPlunge(cut_center_x,cut_center_y,starting_z,bottom_z):
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(cut_center_x,cut_center_y,starting_z)
z=starting_z-DRILL_STEP_DEPTH
while(z>bottom_z):
print "G1 x{:.4f} y{:.4f} z{:.4f} f{:.4f}".format(cut_center_x,cut_center_y,z,DRILL_PLUNGE_FEED_RATE)
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(cut_center_x,cut_center_y,starting_z)
z=z-DRILL_STEP_DEPTH
z=bottom_z
print "G1 x{:.4f} y{:.4f} z{:.4f} f{:.4f}".format(cut_center_x,cut_center_y,z,DRILL_PLUNGE_FEED_RATE)
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(cut_center_x,cut_center_y,starting_z)
####################################
### Go to Safe Hight
####################################
def Go_High():
print "G0 z{:.4f}".format(SAFE_HEIGHT)
####################################
### Go Home
####################################
def Go_Home():
Go_High()
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(HOME_X,HOME_Y,SAFE_HEIGHT)
#####################################
### Cut hole Function
#####################################
def Hole_cut(cut_center_x,cut_center_y,starting_z,bottom_z,cut_diameter):
#get tool path radius
cut_radius = cut_diameter/2
cut_radius=cut_radius-TOOL_RADIUS
cut_depth=starting_z-bottom_z
#CUT_PITCH_SIZE=CUT_PITCH_SIZE
###Open the center of the bore
Drill_MultiPlunge(cut_center_x,cut_center_y,starting_z,bottom_z)
#####################################
### machine hole from inside to out
#####################################
current_radius=1.0*CUT_STEP_LATERAL
while (current_radius<cut_radius):
Helix_Cut(cut_center_x,cut_center_y,starting_z,cut_depth,current_radius)
current_radius=current_radius+CUT_STEP_LATERAL
####################################
### finish hole to exact diameter
Helix_Cut(cut_center_x,cut_center_y,starting_z,cut_depth,cut_radius)
#####################################
### Cut hole carefully Function
#####################################
def Hole_cut_layered(cut_center_x,cut_center_y,starting_z,bottom_z,cut_diameter):
#get tool path radius
cut_radius = cut_diameter/2
cut_depth=starting_z-bottom_z
#CUT_PITCH_SIZE=CUT_PITCH_SIZE
#####################################
### machine hole from inside to out
#####################################
old_z=starting_z
z=starting_z-CUT_STEP_DEPTH
while(z>bottom_z):
Hole_cut(cut_center_x,cut_center_y,old_z,z,cut_diameter)
old_z=z
z=z-CUT_STEP_DEPTH
if(z<bottom_z):
z=bottom_z
Hole_cut(cut_center_x,cut_center_y,old_z,z,cut_diameter)
Go_High()
#COMING SOON!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#####################################
### Cartesian Pitch cut row
#####################################
def Lap_cut_x(x_min,x_max,y,cut_step_lateral,z):
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x_min,y,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x_max,y,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x_max,y+cut_step_lateral,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x_min,y+cut_step_lateral,z)
#####################################
### Cartesian Pitch cut column
#####################################
def Lap_cut_y(y_min,y_max,x,cut_step_lateral,z):
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x,y_min,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x,y_max,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x+cut_step_lateral,y_max,z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(x+cut_step_lateral,y_min,z)
#####################################
### Z face
#####################################
def Z_Face(x_min,x_max,y_min,y_max,z_min,z_start):
#Go_High()
#print "G0 x{:.4f} y{:.4f} z{:.4f}".format(x_min,y_min,SAFE_HEIGHT)
print "G0 x{:.4f} y{:.4f} z{:.4f}".format(x_min,y_min,z_start)
z = z_start
while(z>z_min):
z=z-CUT_FACING_LAYER
if (z<z_min):
z=z_min
y=y_min
while(y<y_max):
Lap_cut_x(x_min,x_max,y,CUT_STEP_LATERAL,z)
y=y+2.0*CUT_STEP_LATERAL
y=y_max-CUT_STEP_LATERAL
Lap_cut_x(x_min,x_max,y,CUT_STEP_LATERAL,z)
#####################################
### perimeter face
#####################################
def Perimeter_Face(end_1_x,end_1_y,end_2_x,end_2_y,face_dir_x,face_dir_y,starting_clerance,top_z,bottom_z):
delta_x=abs(end_2_x-end_1_x)
delta_y=abs(end_2_y-end_1_y)
hyp=(delta_x^2+delta_y^2)
clerance=starting_clerance
while(clerance>0):
x_offset=clerance*(delta_y/hyp)*face_dir_x
y_offset=clerance*(delta_x/hyp)*face_dir_y
Perimeter_Face_Layer(end_1_x+x_offset,end_1_y+y_offset,end_2_x+x_offset,end_2_y+y_offset,face_dir_x,face_dir_y,top_z,bottom_z)
clerance=clerance-CUT_STEP_LATERAL
Perimeter_Face_Layer(end_1_x,end_1_y,end_2_x,end_2_y,face_dir_x,face_dir_y,top_z,bottom_z)
#####################################
### perimeter face Layer
#####################################
def Perimeter_Face_Layer(end_1_x,end_1_y,end_2_x,end_2_y,face_dir_x,face_dir_y,top_z,bottom_z):
delta_x=abs(end_2_x-end_1_x)
delta_y=abs(end_2_y-end_1_y)
hyp=(delta_x^2+delta_y^2)
x_offset=TOOL_RADIUS*(delta_y/hyp)*face_dir_x
y_offset=TOOL_RADIUS*(delta_x/hyp)*face_dir_y
Trench_cut(end_1_x+x_offset,end_1_y+y_offset,end_2_x+x_offset,end_2_y+y_offset,top_z,bottom_z)
#####################################
### Trench
#####################################
def Trench_cut(end_1_x,end_1_y,end_2_x,end_2_y,top_z,bottom_z):
z=top_z
while(z>bottom_z):
Trench_Pitch_Cut(end_1_x,end_1_y,end_2_x,end_2_y,z,CUT_STEP_DEPTH)
z=z-2*CUT_STEP_DEPTH
Trench_Pitch_Cut(end_1_x,end_1_y,end_2_x,end_2_y,bottom_z+CUT_STEP_DEPTH,CUT_STEP_DEPTH)
def Trench_Pitch_Cut(end_1_x,end_1_y,end_2_x,end_2_y,starting_z,cut_pitch_depth):
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(end_1_x,end_1_y,starting_z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(end_2_x,end_2_y,starting_z)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(end_2_x,end_2_y,starting_z-cut_pitch_depth)
print "G1 x{:.4f} y{:.4f} z{:.4f}".format(end_1_x,end_1_y,starting_z-cut_pitch_depth)
#####################################
### Square hole
#####################################
def Rectangular_Hole(x_min,x_max,y_min,y_max,z_min,z_start):
x_min=x_min+TOOL_RADIUS
x_max=x_max-TOOL_RADIUS
y_min=y_min+TOOL_RADIUS
y_max=y_max-TOOL_RADIUS
Z_Face(x_min,x_max,y_min,y_max,z_min,z_start)
z=z_start
while(z>z_min):
Lap_cut_y(y_min,y_max,x_min,CUT_STEP_LATERAL,z)
Lap_cut_y(y_min,y_max,x_min,CUT_STEP_LATERAL,z_min)
z=z_start
while(z>z_min):
Lap_cut_y(y_min,y_max,x_max,CUT_STEP_LATERAL,z)
Lap_cut_y(y_min,y_max,x_max,CUT_STEP_LATERAL,z_min)
###################################################################################
#####################################
###G code genrating code here
#####################################
###################################################################################
####################################
### CAD details and high level code for design of piece
####################################
x_min = 0
x_max = 2
y_min = 0
y_max = 2
z_min = 0.5
z_start = 0.6
Z_Face(x_min,x_max,y_min,y_max,z_min,z_start)
cut_center_x = 1
cut_center_y = 1
cut_diameter = 0.5
starting_z=0.55
bottom_z=0
Hole_cut_layered(cut_center_x,cut_center_y,starting_z,bottom_z,cut_diameter)
####################################################################################
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 12:16:28 2019
@author: tnye
"""
###############################################################################
# Script that goes through observed waveforms from the 2010 M7.8 Mentawai event,
# calculates intensity measures, and stores it all in a flatefile. The IMs
# this script calculates are:
# PGD
# PGA
# PGV
# Displacement spectra bin averages for 20 bins
# Acceleration spectra bin averages for 20 bins
# Velocity spectra bin averages for 20 bins
###############################################################################
# Imports
import numpy as np
import pandas as pd
import obspy
from glob import glob
# Local Imports
import tsueqs_main_fns as tmf
import signal_average_fns as avg
import IM_fns
################################ Parameters ###################################
# Used for directory paths
earthquake_name = 'Mentawai2010'
# Data types to loop through. I have a folder for displacement ('disp') and a
# folder for acceleration ('accel'), so those are my data types.
data_types = ['disp', 'accel']
# Project directory
proj_dir = '/Users/tnye/tsuquakes'
# Table of earthquake data
eq_table_path = '/Users/tnye/tsuquakes/data/misc/events.csv'
eq_table = pd.read_csv(eq_table_path)
# Data directories- one for displacement and one for strong motion (acc)
data_dir = proj_dir + '/data'
disp_dir = data_dir + '/' + earthquake_name + '/disp'
sm_dir = data_dir + '/' + earthquake_name + '/accel'
# Path to send flatfiles of intensity measures
flatfile_path = proj_dir + '/flatfiles/obs_IMs2.csv'
# Parameters for integration to velocity and filtering
fcorner = 1/15. # Frequency at which to high pass filter
order = 2 # Number of poles for filter
# Gather displacement and strong motion files
disp_files = np.array(sorted(glob(disp_dir + '/*.mseed')))
sm_files = np.array(sorted(glob(sm_dir + '/*.mseed')))
################################ Event Data ###################################
origin = pd.to_datetime('2010-10-25T14:42:22')
eventname = earthquake_name
country = eq_table['Country'][11]
origintime = eq_table['Origin Time (UTC)*'][11]
hyplon = eq_table['Longitude'][11]
hyplat = eq_table['Latitude'][11]
hypdepth = eq_table['Depth (km)'][11]
mw = eq_table['Mw'][11]
m0 = 10**(mw*(3/2.) + 9.1)
nostations_i = eq_table['No. Sta'][11]
mechanism = eq_table['Mechanism'][11]
########## Initialize lists for the event and station info for the df #########
eventnames = np.array([])
countries = np.array([])
origintimes = np.array([])
hyplons = np.array([])
hyplats = np.array([])
hypdepths = np.array([])
mws = np.array([])
m0s = np.array([])
mechanisms = np.array([])
networks = np.array([])
stations = np.array([])
stn_type_list = np.array([])
stlons = np.array([])
stlats = np.array([])
stelevs = np.array([])
hypdists = np.array([])
instrument_codes = np.array([])
nostations = np.array([])
E_Td_list = np.array([])
N_Td_list = np.array([])
Z_Td_list = np.array([])
horiz_Td_list = np.array([])
comp3_Td_list = np.array([])
pga_list = np.array([])
pgv_list = np.array([])
pgd_list = np.array([])
tPGD_orig_list = np.array([])
tPGD_parriv_list = np.array([])
tPGA_orig_list = np.array([])
tPGA_parriv_list = np.array([])
tPGA_list = np.array([])
disp_speclist = []
acc_speclist = []
vel_speclist = []
###################### Data Processing and Calculations #######################
# Threshold- used to calculate duration
threshold = 0.0
# Loop through data types
for data in data_types:
###################### Set parameters for data type #######################
if data == 'disp':
# Get metadata file
metadata_file = data_dir + '/' + eventname + '/' + eventname + '_disp.chan'
# Get mseed files
files = disp_files
# Types of IMs associated with this data type
IMs = ['pgd']
# Number of samples to use in computing the pre-event baseline
nsamples = 10
# Channel code prefix
code = 'LX'
# Filtering
# Displacement data don't need to be highpass filtered
filtering = False
elif data == 'accel':
# Get metadata file
metadata_file = data_dir + '/' + eventname + '/' + eventname + '_sm.chan'
# Get mseed files
files = sm_files
# Remove SISI sm station (SNR too low)
for file in files:
if 'SISI' in file:
files = np.delete(files, np.argwhere(files == file))
# Types of IMs associated with this data type
IMs = ['pga', 'pgv']
# Number of samples to use in computing the pre-event baseline
nsamples = 100
# Channel code prefix
code = 'HN'
# Filtering
# Acceleration data need to be highpass fitlered
filtering = True
############################# Get metadata ################################
# Read in metadata file
metadata = pd.read_csv(metadata_file, sep='\t', header=0,
names=['net', 'sta', 'loc', 'chan', 'lat',
'lon', 'elev', 'samplerate', 'gain', 'units'])
# There might be white spaces in the station name, so remove those
metadata.sta = metadata.sta.astype(str)
metadata.sta = metadata.sta.str.replace(' ','')
# Obtain gain and units
gain = metadata['gain'][0]
units = metadata['units'][0]
######################## Get station data and files #######################
# Create lists to add station names, channels, and miniseed files to
stn_name_list = []
channel_list = []
mseed_list = []
# Group all files by station since there should be 3 components for each
# station
N = 3
stn_files = [files[n:n+N] for n in range(0, len(files), N)]
# Loop over files to get the list of station names, channels, and mseed files
for station in stn_files:
# Initialize lists for components and mseed files for this station
components = []
mseeds = []
# Get station name and append to station name list
stn_name = station[0].split('.')[0].split('/')[-1]
stn_name_list.append(stn_name)
# Loop through station mseed files
for mseed_file in station:
# Get channel code and append to components list
channel_code = mseed_file.split('/')[-1].split('.')[1]
components.append(channel_code)
# Append mseed file to mseed files list
mseeds.append(mseed_file)
# Append station's channel code list to channel list for all stations
channel_list.append(components)
# Append station's mseed files list to mseed files list for all stations
mseed_list.append(mseeds)
#################### Begin Processing and Calculations ####################
# Loop over the stations for this earthquake, and start to run the computations:
for i, station in enumerate(stn_name_list):
# Get the components for this station (E, N, and Z):
components = []
for channel in channel_list[i]:
components.append(channel[2])
# Get the metadata for this station from the chan file - put it into
# a new dataframe and reset the index so it starts at 0
if country == 'Japan':
station_metadata = metadata[(metadata.net == station[0:2]) & (metadata.sta == station[2:])].reset_index(drop=True)
else:
station_metadata = metadata[metadata.sta == station].reset_index(drop=True)
# Pull out the data. Take the first row of the subset dataframe,
# assuming that the gain, etc. is always the same:
stnetwork = station_metadata.loc[0].net
stlon = station_metadata.loc[0].lon
stlat = station_metadata.loc[0].lat
stelev = station_metadata.loc[0].elev
stsamprate = station_metadata.loc[0].samplerate
stgain = station_metadata.loc[0].gain
######################### Start computations ##########################
# Compute the hypocentral distance
hypdist = tmf.compute_rhyp(stlon,stlat,stelev,hyplon,hyplat,hypdepth)
# Append the earthquake and station info for this station to their lists
eventnames = np.append(eventnames,eventname)
countries = np.append(countries,country)
origintimes = np.append(origintimes,origintime)
hyplons = np.append(hyplons,hyplon)
hyplats = np.append(hyplats,hyplat)
hypdepths = np.append(hypdepths,hypdepth)
mws = np.append(mws,mw)
m0s = np.append(m0s,m0)
mechanisms = np.append(mechanisms,mechanism)
networks = np.append(networks,stnetwork)
stations = np.append(stations,station)
stlons = np.append(stlons,stlon)
stlats = np.append(stlats,stlat)
stelevs = np.append(stelevs,stelev)
hypdists = np.append(hypdists,hypdist)
if data == 'disp':
stn_type_list = np.append(stn_type_list, 'GNSS')
elif data == 'accel':
stn_type_list = np.append(stn_type_list, 'SM')
# Initialize list for all spectra at this station
station_spec = []
# Turn the components list into an array
components = np.asarray(components)
# Get the values for the E component
if 'E' in components:
# Get index for E component
E_index = np.where(components=='E')[0][0]
# Read file into a stream object
E_raw = obspy.read(mseed_list[i][E_index])
# Correct by gain, so everything is in meters
E_gaincorr = tmf.correct_for_gain(E_raw ,stgain)
# Get the pre-event baseline
E_baseline = tmf.compute_baseline(E_gaincorr,nsamples)
# Get the baseline corrected stream object
E_basecorr = tmf.correct_for_baseline(E_gaincorr,E_baseline)
# High pass filter strong motion data at fcorner specified above
if filtering == True:
E_filt = tmf.highpass(E_basecorr,fcorner,stsamprate,order,zerophase=True)
E_record = E_filt
else:
E_record = E_basecorr
# Get the duration, stream file time of start, and time of stop of shaking
E_Td, E_start, E_end = tmf.determine_Td(threshold,E_record)
E_Td_list = np.append(E_Td_list,E_Td)
# Save corrected mseed file
tra = E_record[0]
tra.stats.channel = code + 'E'
filename = '/Users/tnye/tsuquakes/data/Mentawai2010/' + data + '_corr/' + tra.stats.station + '.' + tra.stats.channel + '.mseed'
tra.write(filename, format='MSEED')
# Get the values for the N component
if 'N' in components:
# Get index for N component
N_index = np.where(components=='N')[0][0]
# Read file into a stream object
N_raw = obspy.read(mseed_list[i][N_index])
# Correct by gain, so everything is in meters
N_gaincorr = tmf.correct_for_gain(N_raw,stgain)
# Get the pre-event baseline
N_baseline = tmf.compute_baseline(N_gaincorr,nsamples)
# Get the baseline corrected stream object
N_basecorr = tmf.correct_for_baseline(N_gaincorr,N_baseline)
# High pass filter strong motion data at fcorner specified above
if filtering == True:
N_filt = tmf.highpass(N_basecorr,fcorner,stsamprate,order,zerophase=True)
N_record = N_filt
else:
N_record = N_basecorr
# Get the duration, stream file time of start, and time of stop of shaking
N_Td, N_start, N_end = tmf.determine_Td(threshold,N_record)
N_Td_list = np.append(N_Td_list,N_Td)
# Save corrected mseed file
tra = N_record[0]
tra.stats.channel = code + 'N'
filename = '/Users/tnye/tsuquakes/data/Mentawai2010/' + data + '_corr/' + tra.stats.station + '.' + tra.stats.channel + '.mseed'
tra.write(filename, format='MSEED')
# Get the values for the Z component
if 'Z' in components:
# Get index for Z component
Z_index = np.where(components=='Z')[0][0]
# Read file into a stream object
Z_raw = obspy.read(mseed_list[i][Z_index])
# Correct by gain, so everything is in meters
Z_gaincorr = tmf.correct_for_gain(Z_raw,stgain)
# Get the pre-event baseline
Z_baseline = tmf.compute_baseline(Z_gaincorr,nsamples)
# Get the baseline corrected stream object
Z_basecorr = tmf.correct_for_baseline(Z_gaincorr,Z_baseline)
# High pass filter strong motion data at fcorner specified above
if filtering == True:
Z_filt = tmf.highpass(Z_basecorr,fcorner,stsamprate,order,zerophase=True)
Z_record = Z_filt
else:
Z_record = Z_basecorr
# Get the duration, stream file time of start, and time of stop of shaking
Z_Td, Z_start, Z_end = tmf.determine_Td(threshold,Z_record)
Z_Td_list = np.append(Z_Td_list,Z_Td)
# Save corrected mseed file
tra = Z_record[0]
tra.stats.channel = code + 'Z'
filename = '/Users/tnye/tsuquakes/data/Mentawai2010/' + data + '_corr/' + tra.stats.station + '.' + tra.stats.channel + '.mseed'
tra.write(filename, format='MSEED')
# Get the values for the horizontal components
if ('E' in components) and ('N' in components):
# Take the min time of E and N start times to be the start
EN_start = np.min([E_start,N_start])
# Take the max time of E and N end times to be the end
EN_end = np.max([E_end,N_end])
# Get the duration to be the time between these
EN_Td = EN_end - EN_start
horiz_Td_list = np.append(horiz_Td_list,EN_Td)
else:
# Append nan to the overall arrays if horizontals don't exist:
horizon_Td_list = np.append(horiz_Td_list,np.nan)
# Get the values for all 3 components
if ('E' in components) and ('N' in components) and ('Z' in components):
# Take the min time of the E,N,Z start times to be the start
ENZ_start = np.min([E_start,N_start,Z_start])
# Take the max of the E,N,Z end times to be the end
ENZ_end = np.max([E_end,N_end,Z_end])
# Get the duration to be the time between these
ENZ_Td = ENZ_end - ENZ_start
comp3_Td_list = np.append(comp3_Td_list,ENZ_Td)
else:
# Append nan to the overall arrays if all 3 components don't exist:
comp3_Td_list = np.append(comp3_Td_list,np.nan)
############################### Velocity ##############################
# Integrate acceleration data to velocity
if data == 'accel':
### Integrate filtered acc data to get velocity data
## East component
E_vel_unfilt = tmf.accel_to_veloc(E_record)
E_vel = tmf.highpass(E_vel_unfilt,fcorner,stsamprate,order,zerophase=True)
# Save filtered velocity mseed file
trv = E_vel[0]
trv.stats.channel = 'HNE'
filename_vel = '/Users/tnye/tsuquakes/data/Mentawai2010/vel_corr/' + trv.stats.station + '.HNE.mseed'
trv.write(filename_vel, format='MSEED')
## North component
N_vel_unfilt = tmf.accel_to_veloc(N_record)
N_vel = tmf.highpass(N_vel_unfilt,fcorner,stsamprate,order,zerophase=True)
# Save filtered velocity mseed file
trv = N_vel[0]
trv.stats.channel = 'HNN'
filename_vel = '/Users/tnye/tsuquakes/data/Mentawai2010/vel_corr/' + trv.stats.station + '.HNN.mseed'
trv.write(filename_vel, format='MSEED')
## Vertical component
Z_vel_unfilt = tmf.accel_to_veloc(Z_record)
Z_vel = tmf.highpass(Z_vel_unfilt,fcorner,stsamprate,order,zerophase=True)
# Save filtered velocity mseed file
trv = Z_vel[0]
trv.stats.channel = 'HNZ'
filename_vel = '/Users/tnye/tsuquakes/data/Mentawai2010/vel_corr/' + trv.stats.station + '.HNZ.mseed'
trv.write(filename_vel, format='MSEED')
########################### Intensity Measures ########################
# Calculate displacement intensity measures
if data == 'disp':
## PGD
# Get euclidean norm of displacement components
euc_norm = avg.get_eucl_norm_3comp(E_record[0].data,
N_record[0].data,
Z_record[0].data)
# Calculate PGD
pgd = np.max(np.abs(euc_norm))
pgd_list = np.append(pgd_list,pgd)
# Calcualte tPGD from origin and p-arrival
tPGD_orig, tPGD_parriv = IM_fns.calc_time_to_peak(pgd, E_record[0],
np.abs(euc_norm),
origin, hypdist)
tPGD_orig_list = np.append(tPGD_orig_list,tPGD_orig)
tPGD_parriv_list = np.append(tPGD_parriv_list,tPGD_parriv)
## Disp Spectra
E_spec_data, freqE, ampE = IM_fns.calc_spectra(E_record, data)
N_spec_data, freqN, ampN = IM_fns.calc_spectra(N_record, data)
Z_spec_data, freqZ, ampZ = IM_fns.calc_spectra(Z_record, data)
# Combine into one array and append to main list
disp_spec = np.concatenate([E_spec_data,N_spec_data,Z_spec_data])
disp_speclist.append(disp_spec.tolist())
# Plot spectra
freqs = [freqE,freqN,freqZ]
amps = [ampE,ampN,ampZ]
IM_fns.plot_spectra(E_record, freqs, amps, 'disp', '', synthetic=False)
# If data type is not displacement, append 'nans'
else:
pgd_list = np.append(pgd_list,np.nan)
tPGD_orig_list = np.append(tPGD_orig_list,np.nan)
tPGD_parriv_list = np.append(tPGD_parriv_list,np.nan)
disp_spec = [np.nan] * 60
disp_speclist.append(disp_spec)
# Calculate acceleration and velocity intensity measures
if data == 'accel':
## PGA
# Get euclidean norm of acceleration components
acc_euc_norm = avg.get_eucl_norm_3comp(E_record[0].data,
N_record[0].data,
Z_record[0].data)
# Calculate PGA
pga = np.max(np.abs(acc_euc_norm))
pga_list = np.append(pga_list,pga)
# Calcualte tPGD from origin and p-arrival
tPGA_orig, tPGA_parriv = IM_fns.calc_time_to_peak(pga, E_record[0],
np.abs(acc_euc_norm),
origin, hypdist)
tPGA_orig_list = np.append(tPGA_orig_list,tPGA_orig)
tPGA_parriv_list = np.append(tPGA_parriv_list,tPGA_parriv)
## Acc Spectra
E_spec_data, freqE, ampE = IM_fns.calc_spectra(E_record, 'sm')
N_spec_data, freqN, ampN = IM_fns.calc_spectra(N_record, 'sm')
Z_spec_data, freqZ, ampZ = IM_fns.calc_spectra(Z_record, 'sm')
# Combine into one array and append to main list
acc_spec = np.concatenate([E_spec_data,N_spec_data,Z_spec_data])
acc_speclist.append(acc_spec.tolist())
# Plot spectra
freqs = [freqE,freqN,freqZ]
amps = [ampE,ampN,ampZ]
IM_fns.plot_spectra(E_record, freqs, amps, 'acc', '', synthetic=False)
## PGV
# Get euclidean norm of velocity components
vel_euc_norm = avg.get_eucl_norm_3comp(E_vel[0].data,
N_vel[0].data,
Z_vel[0].data)
# Calculate PGV
pgv = np.max(np.abs(vel_euc_norm))
pgv_list = np.append(pgv_list,pgv)
## Vel Spectra
E_spec_vel, freqE_v, ampE_v = IM_fns.calc_spectra(E_vel, 'sm')
N_spec_vel, freqN_v, ampN_v = IM_fns.calc_spectra(N_vel, 'sm')
Z_spec_vel, freqZ_v, ampZ_v = IM_fns.calc_spectra(Z_vel, 'sm')
# Combine into one array and append to main list
vel_spec = np.concatenate([E_spec_data,N_spec_data,Z_spec_data])
vel_speclist.append(vel_spec.tolist())
# Plot spectra
freqs_v = [freqE_v,freqN_v,freqZ_v]
amps_v = [ampE_v,ampN_v,ampZ_v]
IM_fns.plot_spectra(E_vel, freqs_v, amps_v, 'vel', '', synthetic=False)
# If data type is not acceleration, append 'nans'
else:
pga_list = np.append(pga_list,np.nan)
pgv_list = np.append(pgv_list,np.nan)
tPGA_orig_list = np.append(tPGA_orig_list,np.nan)
tPGA_parriv_list = np.append(tPGA_parriv_list,np.nan)
acc_spec = [np.nan] * 60
acc_speclist.append(acc_spec)
vel_spec = [np.nan] * 60
vel_speclist.append(vel_spec)
########################### Put together dataframe ############################
# First, make a dictionary for main part of dataframe:
dataset_dict = {'eventname':eventnames,'country':countries,'origintime':origintimes,
'hyplon':hyplons,'hyplat':hyplats,'hypdepth (km)':hypdepths,
'mw':mws,'m0':m0s,'network':networks,'station':stations,
'station_type':stn_type_list,'stlon':stlons,'stlat':stlats,
'stelev':stelevs,'mechanism':mechanisms,'hypdist':hypdists,
'duration_e':E_Td_list,'duration_n':N_Td_list,'duration_z':Z_Td_list,
'duration_horiz':horiz_Td_list,'duration_3comp':comp3_Td_list,
'pga':pga_list, 'pgv':pgv_list,'pgd':pgd_list,'tPGD_origin':tPGD_orig_list,
'tPGD_parriv':tPGD_parriv_list, 'tPGA_origin':tPGA_orig_list,
'tPGA_parriv':tPGA_parriv_list}
# Create list of column names for the displacement spectra bins
disp_cols = ['E_disp_bin1', 'E_disp_bin2', 'E_disp_bin3', 'E_disp_bin4',
'E_disp_bin5', 'E_disp_bin6', 'E_disp_bin7',
'E_disp_bin8', 'E_disp_bin9', 'E_disp_bin10', 'E_disp_bin11',
'E_disp_bin12', 'E_disp_bin13', 'E_disp_bin14', 'E_disp_bin15',
'E_disp_bin16', 'E_disp_bin17', 'E_disp_bin18', 'E_disp_bin19',
'E_disp_bin20', 'N_disp_bin1', 'N_disp_bin2',
'N_disp_bin3', 'N_disp_bin4', 'N_disp_bin5', 'N_disp_bin6',
'N_disp_bin7', 'N_disp_bin8', 'N_disp_bin9', 'N_disp_bin10',
'N_disp_bin11', 'N_disp_bin12', 'N_disp_bin13', 'N_disp_bin14',
'N_disp_bin15', 'N_disp_bin16', 'N_disp_bin17', 'N_disp_bin18',
'N_disp_bin19', 'N_disp_bin20', 'Z_disp_bin1',
'Z_disp_bin2', 'Z_disp_bin3', 'Z_disp_bin4', 'Z_disp_bin5',
'Z_disp_bin6', 'Z_disp_bin7', 'Z_disp_bin8', 'Z_disp_bin9',
'Z_disp_bin10', 'Z_disp_bin11', 'Z_disp_bin12', 'Z_disp_bin13',
'Z_disp_bin14', 'Z_disp_bin15', 'Z_disp_bin16', 'Z_disp_bin17',
'Z_disp_bin18', 'Z_disp_bin19', 'Z_disp_bin20']
# Create list of column names for the acceleration spectra bins
acc_cols = ['E_acc_bin1', 'E_acc_bin2', 'E_acc_bin3', 'E_acc_bin4',
'E_acc_bin5', 'E_acc_bin6', 'E_acc_bin7', 'E_acc_bin8',
'E_acc_bin9', 'E_acc_bin10', 'E_acc_bin11', 'E_acc_bin12',
'E_acc_bin13', 'E_acc_bin14', 'E_acc_bin15', 'E_acc_bin16',
'E_acc_bin17', 'E_acc_bin18', 'E_acc_bin19', 'E_acc_bin20',
'N_acc_bin1', 'N_acc_bin2', 'N_acc_bin3',
'N_acc_bin4', 'N_acc_bin5', 'N_acc_bin6', 'N_acc_bin7',
'N_acc_bin8', 'N_acc_bin9', 'N_acc_bin10', 'N_acc_bin11',
'N_acc_bin12', 'N_acc_bin13', 'N_acc_bin14', 'N_acc_bin15',
'N_acc_bin16', 'N_acc_bin17', 'N_acc_bin18', 'N_acc_bin19',
'N_acc_bin20', 'Z_acc_bin1', 'Z_acc_bin2',
'Z_acc_bin3', 'Z_acc_bin4', 'Z_acc_bin5', 'Z_acc_bin6',
'Z_acc_bin7', 'Z_acc_bin8', 'Z_acc_bin9', 'Z_acc_bin10',
'Z_acc_bin11', 'Z_acc_bin12', 'Z_acc_bin13', 'Z_acc_bin14',
'Z_acc_bin15', 'Z_acc_bin16', 'Z_acc_bin17', 'Z_acc_bin18',
'Z_acc_bin19', 'Z_acc_bin20']
# Create list of column names for the velocity spectra bins
vel_cols = ['E_vel_bin1', 'E_vel_bin2', 'E_vel_bin3', 'E_vel_bin4',
'E_vel_bin5', 'E_vel_bin6', 'E_vel_bin7', 'E_vel_bin8',
'E_vel_bin9', 'E_vel_bin10', 'E_vel_bin11', 'E_vel_bin12',
'E_vel_bin13', 'E_vel_bin14', 'E_vel_bin15', 'E_vel_bin16',
'E_vel_bin17', 'E_vel_bin18', 'E_vel_bin19', 'E_vel_bin20',
'N_vel_bin1', 'N_vel_bin2', 'N_vel_bin3',
'N_vel_bin4', 'N_vel_bin5', 'N_vel_bin6', 'N_vel_bin7',
'N_vel_bin8', 'N_vel_bin9', 'N_vel_bin10', 'N_vel_bin11',
'N_vel_bin12', 'N_vel_bin13', 'N_vel_bin14', 'N_vel_bin15',
'N_vel_bin16', 'N_vel_bin17', 'N_vel_bin18', 'N_vel_bin19',
'N_vel_bin20', 'Z_vel_bin1', 'Z_vel_bin2',
'Z_vel_bin3', 'Z_vel_bin4', 'Z_vel_bin5', 'Z_vel_bin6',
'Z_vel_bin7', 'Z_vel_bin8', 'Z_vel_bin9', 'Z_vel_bin10',
'Z_vel_bin11', 'Z_vel_bin12', 'Z_vel_bin13', 'Z_vel_bin14',
'Z_vel_bin15', 'Z_vel_bin16', 'Z_vel_bin17', 'Z_vel_bin18',
'Z_vel_bin19', 'Z_vel_bin20']
# Create spectra dataframes for each data type
disp_spec_df = pd.DataFrame(disp_speclist, columns=disp_cols)
acc_spec_df = pd.DataFrame(acc_speclist, columns=acc_cols)
vel_spec_df = pd.DataFrame(vel_speclist, columns=vel_cols)
# Make main dataframe
main_df = pd.DataFrame(data=dataset_dict)
# Combine main dataframe with spectra dataframes
flatfile_df = pd.concat([main_df, disp_spec_df.reindex(main_df.index),
acc_spec_df.reindex(main_df.index),
vel_spec_df.reindex(main_df.index)], axis=1)
# Save df to file:
flatfile_df.to_csv(flatfile_path,index=False)
|
import os
import random as rd
import numpy as np
import torch
import time
import pandas as pd
import tqdm
from torch.utils.data import DataLoader
from graph_recsys_benchmark.utils import *
class BaseSolver(object):
def __init__(self, model_class, dataset_args, model_args, train_args):
self.model_class = model_class
self.dataset_args = dataset_args
self.model_args = model_args
self.train_args = train_args
def generate_candidates(self, dataset, u_nid):
"""
Return the recommendation candidates to the algorithms to rank
:param dataset: graph_recsys_benchmark.dataset.Dataset object
:param u_nid: user node ids
:return:
"""
raise NotImplementedError
def metrics(
self,
run,
epoch,
model,
dataset
):
"""
:param run:
:param epoch:
:param model:
:param dataset:
:return:
"""
train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map = \
dataset.train_pos_unid_inid_map, dataset.test_pos_unid_inid_map, dataset.neg_unid_inid_map
HRs, NDCGs, AUC, eval_losses = np.zeros((0, 16)), np.zeros((0, 16)), np.zeros((0, 1)), np.zeros((0, 1))
u_nids = list(test_pos_unid_inid_map.keys())
test_bar = tqdm.tqdm(u_nids, total=len(u_nids))
for u_idx, u_nid in enumerate(test_bar):
pos_i_nids, neg_i_nids = self.generate_candidates(
dataset, u_nid
)
if len(pos_i_nids) == 0 or len(neg_i_nids) == 0:
raise ValueError("No pos or neg samples found in evaluation!")
pos_i_nid_df = pd.DataFrame({'u_nid': [u_nid for _ in range(len(pos_i_nids))], 'pos_i_nid': pos_i_nids})
neg_i_nid_df = pd.DataFrame({'u_nid': [u_nid for _ in range(len(neg_i_nids))], 'neg_i_nid': neg_i_nids})
pos_neg_pair_t = torch.from_numpy(
pd.merge(pos_i_nid_df, neg_i_nid_df, how='inner', on='u_nid').to_numpy()
).to(self.train_args['device'])
if self.model_args['model_type'] == 'MF':
pos_neg_pair_t[:, 0] -= dataset.e2nid_dict['uid'][0]
pos_neg_pair_t[:, 1:] -= dataset.e2nid_dict['iid'][0]
loss = model.loss(pos_neg_pair_t).detach().cpu().item()
pos_u_nids_t = torch.from_numpy(np.array([u_nid for _ in range(len(pos_i_nids))])).to(self.train_args['device'])
pos_i_nids_t = torch.from_numpy(np.array(pos_i_nids)).to(self.train_args['device'])
neg_u_nids_t = torch.from_numpy(np.array([u_nid for _ in range(len(neg_i_nids))])).to(self.train_args['device'])
neg_i_nids_t = torch.from_numpy(np.array(neg_i_nids)).to(self.train_args['device'])
if self.model_args['model_type'] == 'MF':
pos_u_nids_t -= dataset.e2nid_dict['uid'][0]
neg_u_nids_t -= dataset.e2nid_dict['uid'][0]
pos_i_nids_t -= dataset.e2nid_dict['iid'][0]
neg_i_nids_t -= dataset.e2nid_dict['iid'][0]
pos_pred = model.predict(pos_u_nids_t, pos_i_nids_t).reshape(-1)
neg_pred = model.predict(neg_u_nids_t, neg_i_nids_t).reshape(-1)
_, indices = torch.sort(torch.cat([pos_pred, neg_pred]), descending=True)
hit_vec = (indices < len(pos_i_nids)).cpu().detach().numpy()
pos_pred = pos_pred.cpu().detach().numpy()
neg_pred = neg_pred.cpu().detach().numpy()
HRs = np.vstack([HRs, hit(hit_vec)])
NDCGs = np.vstack([NDCGs, ndcg(hit_vec)])
AUC = np.vstack([AUC, auc(pos_pred, neg_pred)])
eval_losses = np.vstack([eval_losses, loss])
test_bar.set_description(
'Run {}, epoch: {}, HR@10: {:.4f}, NDCG@10: {:.4f}, '
'AUC: {:.4f}, eval loss: {:.4f}, '.format(
run, epoch,
HRs.mean(axis=0)[5], NDCGs.mean(axis=0)[5], AUC.mean(axis=0)[0],
eval_losses.mean(axis=0)[0])
)
return HRs.mean(axis=0), NDCGs.mean(axis=0), AUC.mean(axis=0)[0], eval_losses.mean(axis=0)[0]
def run(self):
global_logger_path = self.train_args['logger_folder']
if not os.path.exists(global_logger_path):
os.makedirs(global_logger_path, exist_ok=True)
global_logger_file_path = os.path.join(global_logger_path, 'global_logger.pkl')
HRs_per_run_np, NDCGs_per_run_np, AUC_per_run_np, train_loss_per_run_np, eval_loss_per_run_np, last_run = \
load_global_logger(global_logger_file_path)
logger_file_path = os.path.join(global_logger_path, 'logger_file.txt')
with open(logger_file_path, 'w') as logger_file:
start_run = last_run + 1
if start_run <= self.train_args['runs']:
for run in range(start_run, self.train_args['runs'] + 1):
# Fix the random seed
seed = 2019 + run
rd.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Create the dataset
self.dataset_args['seed'] = seed
dataset = load_dataset(self.dataset_args)
# Create model and optimizer
if self.model_args['model_type'] == 'Graph':
if self.model_args['if_use_features']:
self.model_args['emb_dim'] = dataset.data.x.shape[1]
self.model_args['num_nodes'] = dataset.num_nodes
self.model_args['dataset'] = dataset
elif self.model_args['model_type'] == 'MF':
self.model_args['num_users'] = dataset.num_users
self.model_args['num_items'] = dataset.num_items
model = self.model_class(**self.model_args).to(self.train_args['device'])
opt_class = get_opt_class(self.train_args['opt'])
optimizer = opt_class(
params=model.parameters(),
lr=self.train_args['lr'],
weight_decay=self.train_args['weight_decay']
)
# Load models
weights_path = os.path.join(self.train_args['weights_folder'], 'run_{}'.format(str(run)))
if not os.path.exists(weights_path):
os.makedirs(weights_path, exist_ok=True)
weights_file = os.path.join(weights_path, 'latest.pkl')
model, optimizer, last_epoch, rec_metrics = load_model(weights_file, model, optimizer,
self.train_args['device'])
HRs_per_epoch_np, NDCGs_per_epoch_np, AUC_per_epoch_np, train_loss_per_epoch_np, eval_loss_per_epoch_np = \
rec_metrics
if torch.cuda.is_available():
torch.cuda.synchronize()
start_epoch = last_epoch + 1
if start_epoch == 1 and self.train_args['init_eval']:
model.eval()
HRs_before_np, NDCGs_before_np, AUC_before_np, eval_loss_before_np = \
self.metrics(run, 0, model, dataset)
print(
'Initial performance HR@10: {:.4f}, NDCG@10: {:.4f}, '
'AUC: {:.4f}, eval loss: {:.4f} \n'.format(
HRs_before_np[5], NDCGs_before_np[5], AUC_before_np, eval_loss_before_np
)
)
logger_file.write(
'Initial performance HR@10: {:.4f}, NDCG@10: {:.4f}, '
'AUC: {:.4f}, eval loss: {:.4f} \n'.format(
HRs_before_np[5], NDCGs_before_np[5], AUC_before_np, eval_loss_before_np
)
)
t_start = time.perf_counter()
if start_epoch <= self.train_args['epochs']:
# Start training model
for epoch in range(start_epoch, self.train_args['epochs'] + 1):
loss_per_batch = []
model.train()
dataset.negative_sampling()
recsys_train_dataloader = DataLoader(
dataset,
shuffle=True,
batch_size=self.train_args['batch_size'],
num_workers=self.train_args['num_workers']
)
train_bar = tqdm.tqdm(recsys_train_dataloader, total=len(recsys_train_dataloader))
for _, batch in enumerate(train_bar):
if self.model_args['model_type'] == 'MF':
if self.model_args['loss_type'] == 'BCE':
batch[:, 0] -= dataset.e2nid_dict['uid'][0]
batch[:, 1] -= dataset.e2nid_dict['iid'][0]
elif self.model_args['loss_type'] == 'BPR':
batch[:, 0] -= dataset.e2nid_dict['uid'][0]
batch[:, 1:] -= dataset.e2nid_dict['iid'][0]
batch = batch.to(self.train_args['device'])
optimizer.zero_grad()
loss = model.loss(batch)
loss.backward()
optimizer.step()
loss_per_batch.append(loss.detach().cpu().item())
train_loss = np.mean(loss_per_batch)
train_bar.set_description(
'Run: {}, epoch: {}, train loss: {:.4f}'.format(run, epoch, train_loss)
)
model.eval()
HRs, NDCGs, AUC, eval_loss = self.metrics(run, epoch, model, dataset)
HRs_per_epoch_np = np.vstack([HRs_per_epoch_np, HRs])
NDCGs_per_epoch_np = np.vstack([NDCGs, HRs])
AUC_per_epoch_np = np.vstack([AUC_per_epoch_np, AUC])
train_loss_per_epoch_np = np.vstack([train_loss_per_epoch_np, np.array([train_loss])])
eval_loss_per_epoch_np = np.vstack([eval_loss_per_epoch_np, np.array([eval_loss])])
if epoch in self.train_args['save_epochs']:
weightpath = os.path.join(weights_path, '{}.pkl'.format(epoch))
save_model(
weightpath,
model, optimizer, epoch,
rec_metrics=(
HRs_per_epoch_np, NDCGs_per_epoch_np, AUC_per_epoch_np, train_loss_per_epoch_np, eval_loss_per_epoch_np)
)
if epoch > self.train_args['save_every_epoch']:
weightpath = os.path.join(weights_path, 'latest.pkl')
save_model(
weightpath,
model, optimizer, epoch,
rec_metrics=(
HRs_per_epoch_np, NDCGs_per_epoch_np, AUC_per_epoch_np, train_loss_per_epoch_np, eval_loss_per_epoch_np)
)
logger_file.write(
'Run: {}, epoch: {}, HR@10: {:.4f}, NDCG@10: {:.4f}, AUC: {:.4f}, '
'train loss: {:.4f}, eval loss: {:.4f} \n'.format(
run, epoch, HRs[5], NDCGs[5], AUC, train_loss, eval_loss
)
)
if torch.cuda.is_available():
torch.cuda.synchronize()
t_end = time.perf_counter()
HRs_per_run_np = np.vstack([HRs_per_run_np, HRs_per_epoch_np[-1]])
NDCGs_per_run_np = np.vstack([NDCGs_per_run_np, NDCGs_per_epoch_np[-1]])
AUC_per_run_np = np.vstack([AUC_per_run_np, AUC_per_epoch_np[-1]])
train_loss_per_run_np = np.vstack([train_loss_per_run_np, train_loss_per_epoch_np[-1]])
eval_loss_per_run_np = np.vstack([eval_loss_per_run_np, eval_loss_per_epoch_np[-1]])
save_global_logger(
global_logger_file_path,
HRs_per_run_np, NDCGs_per_run_np, AUC_per_run_np,
train_loss_per_run_np, eval_loss_per_run_np
)
print(
'Run: {}, Duration: {:.4f}, HR@10: {:.4f}, NDCG@10: {:.4f}, AUC: {:.4f}, '
'train_loss: {:.4f}, eval loss: {:.4f}\n'.format(
run, t_end - t_start, HRs_per_epoch_np[-1][5], NDCGs_per_epoch_np[-1][5],
AUC_per_epoch_np[-1][0], train_loss_per_epoch_np[-1][0], eval_loss_per_epoch_np[-1][0])
)
logger_file.write(
'Run: {}, Duration: {:.4f}, HR@10: {:.4f}, NDCG@10: {:.4f}, AUC: {:.4f}, '
'train_loss: {:.4f}, eval loss: {:.4f}\n'.format(
run, t_end - t_start, HRs_per_epoch_np[-1][5], NDCGs_per_epoch_np[-1][5],
AUC_per_epoch_np[-1][0], train_loss_per_epoch_np[-1][0], eval_loss_per_epoch_np[-1][0])
)
print(
'Overall HR@10: {:.4f}, NDCG@10: {:.4f}, AUC: {:.4f}, train loss: {:.4f}, eval loss: {:.4f}\n'.format(
HRs_per_run_np.mean(axis=0)[5], NDCGs_per_run_np.mean(axis=0)[5], AUC_per_run_np.mean(axis=0)[0], train_loss_per_run_np.mean(axis=0)[0],
eval_loss_per_run_np.mean(axis=0)[0]
)
)
logger_file.write(
'Overall HR@10: {:.4f}, NDCG@10: {:.4f}, AUC: {:.4f}, train loss: {:.4f}, eval loss: {:.4f}\n'.format(
HRs_per_run_np.mean(axis=0)[5], NDCGs_per_run_np.mean(axis=0)[5], AUC_per_run_np.mean(axis=0)[0], train_loss_per_run_np.mean(axis=0)[0],
eval_loss_per_run_np.mean(axis=0)[0]
)
)
|
#!/usr/bin/env python
# _#_ coding:utf-8 _*_
import logging
from django.http import JsonResponse
import time, hmac, hashlib
from django.shortcuts import render
from assets.models import *
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.decorators import login_required
from django.conf import settings
def create_signature(secret, *parts):
hash = hmac.new(secret, digestmod=hashlib.sha1)
for part in parts:
hash.update(str(part).encode("utf-8"))
return hash.hexdigest()
#@login_required(login_url='/login')
#@permission_required('assets.add_assets', raise_exception=True)
def get_auth_obj(request):
api_key = settings.GATEONE_API_KEY
secret = settings.GATEONE_API_SECRET
gateone_url = settings.GATEONE_SERVER_ADDR
user = str(request.user)
authobj = {
'api_key': api_key,
'upn': user,
'timestamp': str(int(time.time() * 1000)),
'signature_method': 'HMAC-SHA1',
'api_version': '1.0',
}
authobj['signature'] = create_signature(secret, authobj['api_key'], authobj['upn'], authobj['timestamp'])
auth_info_and_server = {'url': gateone_url , 'auth': authobj }
return JsonResponse(auth_info_and_server)
@login_required(login_url='/login')
@permission_required('assets.add_assets', raise_exception=True)
def gateone(request,aid):
try:
asset = Assets.objects.get(id=aid)
if request.user.is_superuser:
return render(request,'assets/gateone.html',{"user":request.user,"asset": asset})
else:
# user_server = User_Server.objects.get(user_id=request.user.id, server_id=aid)
# userServer = User_Server.objects.filter(user_id=request.user.id)
# serverList = []
# for s in userServer:
# ser = Server_Assets.objects.get(id=s.server_id)
# serverList.append(ser)
# if user_server:
# return render(request,'webssh/gateone.html',{"user":request.user,"server":server})
pass
except Exception as ex:
logging.getLogger().error(msg="请求gateone失败: {ex}".format(ex=str(ex)))
return render(request,'assets/gateone.html',{"user":request.user,"errorInfo":"请联系管理员"})
|
#HelloWorld 파스칼표기법
#helloWorld 낙타표기법
#hello_world 언더스코어(파이썬)
#타입추론
#모든 것이 오브젝트
#인터프리터
#1.숫자
a = 1
b = 1.2
c = 4e5
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
#연산자 +,-,*,/,%
e = 3
f = 4
print(e**f) # ** 제곱
print(e//f) # // 몫
print(4%3) # % 나머지
#2. 문자열
# " ", ' ' 둘다 구분 안함 둘중 하나 아무거나 써도 됨
# ''' '''
s1 = "안녕하세요"
s2 = '안녕하세요'
s3 = '''
안녕하세요
'''
print(s1)
print(s2)
print(s3)
s4 = s1 + s2
print(s4)
s5 = '홍길동'
print(s5+'님 안녕하세요')
print(f"{s5}님 안녕하세요")
print('='*50)
#3. 슬라이싱:
str1 = '가나다라마'
print(str1[0:3]) #슬라이싱 연산자는 마지막 인덱스 번호 직전까지 뽑아요
print(str1[-1])
print(str1[1:]) #마직막 인덱스 번호가 없으면 끝까지 뽑아요
|
# My first Python script
import sys # Load a ibrary module
print(sys.platform) # Print the name of the platform in use
print(2**100) # Print 2 to a power of 100
x = "spam" # Assing the string "spam" to the variable x
print(x*8) # Print the x variable 8 times
|
# Copyright 2019 EPFL, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.getcwd().split('baselines')[0])
from modules.hardnet.models import HardNet
from tqdm import tqdm
from PIL import Image
from baselines.matching.utils.metrics import get_ranks
import cv2
from configs.defaults import _C as cfg
import argparse
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # match IDs of nvidia-smi
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # only set device 3 visible
np.random.seed(42)
class AMOSDataset():
'''https://github.com/pultarmi/AMOS_patches'''
def __init__(self, root_dir, padTo=1500):
self.root_dir = root_dir
# read Train and Test folders
self.test_path = os.path.join(self.root_dir, 'Train')
# read data - [0] - patches, [1] - LAFs, [2] - view from which patches were extracted
self.test_LAFs = torch.load(os.path.join(self.root_dir,
'train_patches.pt'),
map_location=torch.device('cpu'))
self.images_in_views = np.load(
os.path.join(self.root_dir, 'images_in_views.npy')).item()
# name of views - sorted - corresponding to the data [3] in the .pt file
print(len(self.test_LAFs[1]))
self.views = sorted(os.listdir(self.test_path))
print(len(self.views))
self.skip_views = [
'00034154', '00036180', '00036169', '00011611', '00004431'
]
self.padTo = padTo
self.all_images, self.anchors, self.positives = self.get_pair_of_images_and_keypoints(
)
def __len__(self):
return len(self.all_images)
def get_pair_of_images_and_keypoints(self):
anchors, positives = [], []
all_images = {}
views_images = {}
for idx in tqdm(range(len(self.test_LAFs[1])),
total=len(self.test_LAFs[1])):
LAFs = self.test_LAFs[1][idx]
view_idx = int(self.test_LAFs[2][idx].numpy()[0])
view = self.views[view_idx]
if view in self.skip_views: continue
#images_in_view = (os.listdir(os.path.join(self.test_path, view)))
images_in_view = self.images_in_views[view]
#views_images[view] = images_in_view
images, keypoints, orientations = np.random.choice(images_in_view, 2, replace=False),\
LAFs[:,2].cpu().data.numpy(), \
np.zeros(2)
images = [
os.path.join(self.test_path, view, image) for image in images
]
read_img = Image.open(images[0])
width, height = read_img.size
if width > self.padTo or height > self.padTo:
continue
scale = np.sqrt(LAFs[0, 0] * LAFs[1, 1] - LAFs[0, 1] * LAFs[1, 0] +
1e-10).cpu().data.numpy()
anchors.append([images[0], keypoints, orientations[0], scale])
positives.append([images[1], keypoints, orientations[1], scale])
for idx, img in enumerate(images):
if img in all_images.keys():
all_images[img].append(
[keypoints, orientations[idx], scale])
else:
all_images[img] = [[keypoints, orientations[idx], scale]]
# np.save('images_in_views_old.npy', np.array(views_images))
return list(all_images.items()), anchors, positives
def __getitem__(self, idx):
image_data = self.all_images[idx]
image_path, keypoints, orientations, scales = image_data[0], \
np.array([x[0] for x in image_data[1]]),\
np.array([x[1] for x in image_data[1]]),\
np.array([x[2] for x in image_data[1]])
image_anchor = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image_anchor = np.expand_dims(image_anchor, 0)
if image_anchor.shape[1] > self.padTo or image_anchor.shape[
2] > self.padTo:
print(image_path)
raise RuntimeError(
"Image {} exceeds acceptable size, can't apply padding beyond image size."
.format(image_path))
fillHeight = self.padTo - image_anchor.shape[1]
fillWidth = self.padTo - image_anchor.shape[2]
padLeft = int(np.round(fillWidth / 2))
padRight = int(fillWidth - padLeft)
padUp = int(np.round(fillHeight / 2))
padDown = int(fillHeight - padUp)
image_anchor = np.pad(
image_anchor,
pad_width=((0, 0), (padUp, padDown), (padLeft, padRight)),
mode='reflect') # use either "reflect" or "symmetric"
image_anchor = torch.from_numpy(np.array(image_anchor))
keypoints_locations = []
for kpIDX, kp_loc in enumerate(keypoints): # iterate over keypoints
normKp = 2 * np.array([[(kp_loc[0] + padLeft) / (self.padTo),
(kp_loc[1] + padUp) / (self.padTo)]]) - 1
keypoints_locations.append(normKp)
theta = [
torch.from_numpy(np.array(keypoints_locations)).float().squeeze(),
torch.from_numpy(scales).float(),
torch.from_numpy(np.array(orientations)).float(),
torch.from_numpy(np.array(keypoints)).float().squeeze()
]
return image_path, image_anchor, theta
def reshape_descriptors_for_anchors_positives(anchors, positives, descriptors,
patches_for_images):
new_anchors, new_positives = [], []
for idx in range(len(anchors)):
anchor, positive = anchors[idx], positives[idx]
anchor_name, positive_name = os.path.basename(
anchor[0]), os.path.basename(positive[0])
anchor_keypoints, positive_keypoints = str(anchor[1]), str(positive[1])
desc_anchor, desc_positive = descriptors[anchor_name][anchor_keypoints], \
descriptors[positive_name][positive_keypoints]
new_anchors.append(desc_anchor)
new_positives.append(desc_positive)
return new_anchors, new_positives
def get_descriptors_from_list_of_keypoints(model, dataset):
pbar = tqdm(enumerate(dataset), total=len(dataset))
stats = {}
stats["rankCounts"] = np.zeros(len(dataset.anchors))
descs_for_imgs, patches_for_images = {}, {}
for batch_idx, data in pbar:
image_path, image_anchor, theta = data
keypoints, scales, oris, unnorm_keypoints = theta[0], theta[1], theta[
2], theta[3]
img_filename = os.path.basename(image_path)
imgs, img_keypoints = image_anchor.to(device), [
keypoints.to(device),
scales.to(device),
oris.to(device)
]
with torch.no_grad():
descriptors, patches = model({img_filename: imgs}, img_keypoints,
[img_filename] *
len(img_keypoints[0]))
if img_filename in descs_for_imgs:
for idx, keypoint in enumerate(unnorm_keypoints):
descs_for_imgs[img_filename][str(keypoint.data.cpu().numpy(
))] = descriptors[idx].squeeze().data.cpu().numpy()
else:
descs_for_imgs[img_filename] = {}
for idx, keypoint in enumerate(unnorm_keypoints):
descs_for_imgs[img_filename][str(keypoint.data.cpu().numpy(
))] = descriptors[idx].squeeze().data.cpu().numpy()
anchors_desc, positive_desc = reshape_descriptors_for_anchors_positives(
dataset.anchors, dataset.positives, descs_for_imgs, patches_for_images)
rank = get_ranks(np.array(anchors_desc), np.array(positive_desc))
for rankOccurs in rank:
stats["rankCounts"][rankOccurs] += 1
cumCounts = np.cumsum(stats["rankCounts"])
with np.errstate(all='ignore'):
stats["rankCDF"] = cumCounts / cumCounts[-1]
print('Rank 0: {}'.format(stats["rankCDF"][0]))
return stats["rankCDF"]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="HardNet Training")
parser.add_argument("--config_file",
default="configs/init_one_example_ptn_96.yml",
help="path to config file",
type=str)
parser.add_argument("--amos_dataset",
default="dl/AMOS/Handpicked_v3_png/",
help="path to config file",
type=str)
parser.add_argument("opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.config_file != "": cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if not cfg.TRAINING.NO_CUDA:
torch.cuda.manual_seed_all(cfg.TRAINING.SEED)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
amos_dataset = AMOSDataset(args.amos_dataset, cfg.TRAINING.PAD_TO)
model = HardNet(transform=cfg.TEST.TRANSFORMER,
coords=cfg.TEST.COORDS,
patch_size=cfg.TEST.IMAGE_SIZE,
scale=cfg.TEST.SCALE,
is_desc256=cfg.TEST.IS_DESC_256,
orientCorrect=cfg.TEST.ORIENT_CORRECTION)
checkpoint = os.path.join(cfg.TEST.MODEL_WEIGHTS)
model.load_state_dict(torch.load(checkpoint)['state_dict'])
model.eval()
model.to(device)
ranks = get_descriptors_from_list_of_keypoints(model, amos_dataset)
print(ranks)
|
'''
Classic dp problem with the twist that single digits has to be betwwen 1 and 9 and
double digits has to be between 10 and 26. We save i's value on i+1 and handle the
base/error case. Since we can only gurantee 1 as out put(after handline error case)
we set dp[0:1] as 1 and start iterating on index 2
Time O(n) | Space O(n) can optimize dp two a vars holding cur, prev, and prev - 2 for O(1) space
'''
class Solution:
def numDecodings(self, s: str) -> int:
#error case. Anyting begins with 0 is an error
if s[0] == "0": return 0
dp = [0 for x in range(len(s)+1)] #init dp table with 0
dp[0:1] = [1,1] #since we save result of i on i+1, we init dp[0:1] as 1
for i in range(2, len(s) + 1): #starting with index 2, we start calculating previous 2.
#Even if len(s) is 2: index 2 will trigger resulting in one dight: 1 and two digit: 2
#single digit check
if 0 < int(s[i-1:i]) <= 9:
dp[i] += dp[i-1]
#two digit check
if 10 <= int(s[i-2:i]) <= 26:
dp[i] += dp[i-2]
print(dp)
return dp[len(s)] # we can also do dp[-2] but need to be careful on len(s) |
def test_nogarbage_fixture(testdir):
testdir.makepyfile("""
def test_fail(nogarbage):
assert False
def test_pass(nogarbage):
pass
def test_except(nogarbage):
try:
assert False
except AssertionError:
pass
def test_circular(nogarbage):
l1 = []
l2 = [l1]
l1.append(l2)
def test_collect(nogarbage):
import gc
gc.collect()
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_fail FAIL*',
'*::test_pass PASS*',
'*::test_except PASS*',
'*::test_circular ERROR*',
'*::test_collect ERROR*',
])
assert result.ret != 0
|
import os
import sqlite3
from flask import Flask, redirect, abort, g, request, jsonify
app = Flask(__name__)
DATABASE = "data.db"
REGISTER_SUCCESS = 0
REGISTER_FAIL_TOO_SHORT = 1
REGISTER_FAIL_ALIAS_TOO_SHORT = 2
REGISTER_FAIL_EXISTED = 3
REGISTER_FAIL_AUDITING = 4
REGISTER_FAIL_ADDR_EXISTED = 5
REGISTER_FAIL_ALIAS_EXISTED = 6
REGISTER_FAIL_DB = 7
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS users(
name TEXT PRIMARY KEY NOT NULL UNIQUE,
address TEXT NOT NULL,
alias TEXT,
audited INTEGER NOT NULL
);''')
return db
def commit_db():
db = getattr(g, '_database', None)
try:
db.commit()
return True
except:
db.rollback()
return False
def get_addr(name):
c = get_db().cursor()
cursor = c.execute(
"SELECT address FROM users WHERE name = '{}' AND audited = 1".format(name))
row = cursor.fetchone()
if row == None:
cursor = c.execute(
"SELECT address FROM users WHERE alias = '{}' AND audited = 1".format(name))
row = cursor.fetchone()
if row == None:
return None
else:
return row[0]
@ app.route('/r', methods=['POST'])
def register():
user = request.get_json(silent=True)
if user == None:
abort(400)
else:
if "name" in user and "address" in user:
name = user["name"]
if len(name) < 2:
return jsonify(status=REGISTER_FAIL_TOO_SHORT)
address = user["address"]
alias = None
if "alias" in user:
alias = user["alias"]
if len(alias) < 2:
return jsonify(status=REGISTER_FAIL_ALIAS_TOO_SHORT)
c = get_db().cursor()
cursor = c.execute(
"SELECT audited FROM users WHERE name = '{}'".format(name))
row = cursor.fetchone()
if row != None:
if row[0] == 0:
return jsonify(status=REGISTER_FAIL_AUDITING)
else:
return jsonify(status=REGISTER_FAIL_EXISTED)
cursor = c.execute(
"SELECT audited FROM users WHERE address = '{}'".format(address))
row = cursor.fetchone()
if row != None:
return jsonify(status=REGISTER_FAIL_ADDR_EXISTED)
if alias != None:
cursor = c.execute(
"SELECT audited FROM users WHERE alias = '{}'".format(alias))
row = cursor.fetchone()
if row != None:
return jsonify(status=REGISTER_FAIL_ALIAS_EXISTED)
if alias == None:
c.execute("INSERT INTO users (name, address, audited) VALUES ('{}', '{}', 0)".format(
name, address))
else:
c.execute("INSERT INTO users (name, address, alias, audited) VALUES ('{}', '{}', '{}', 0)".format(
name, address, alias))
if commit_db():
return jsonify(status=REGISTER_SUCCESS)
else:
return jsonify(status=REGISTER_FAIL_DB)
abort(404)
else:
abort(400)
@ app.route('/<name>')
def live(name):
addr = get_addr(name)
if addr == None:
abort(404)
else:
return redirect(addr, code=302)
@ app.teardown_appcontext
def close_conn(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
|
# Generated from sfql.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .sfqlParser import sfqlParser
else:
from sfqlParser import sfqlParser
# This class defines a complete listener for a parse tree produced by sfqlParser.
class sfqlListener(ParseTreeListener):
# Enter a parse tree produced by sfqlParser#definitions.
def enterDefinitions(self, ctx:sfqlParser.DefinitionsContext):
pass
# Exit a parse tree produced by sfqlParser#definitions.
def exitDefinitions(self, ctx:sfqlParser.DefinitionsContext):
pass
# Enter a parse tree produced by sfqlParser#f_query.
def enterF_query(self, ctx:sfqlParser.F_queryContext):
pass
# Exit a parse tree produced by sfqlParser#f_query.
def exitF_query(self, ctx:sfqlParser.F_queryContext):
pass
# Enter a parse tree produced by sfqlParser#f_rule.
def enterF_rule(self, ctx:sfqlParser.F_ruleContext):
pass
# Exit a parse tree produced by sfqlParser#f_rule.
def exitF_rule(self, ctx:sfqlParser.F_ruleContext):
pass
# Enter a parse tree produced by sfqlParser#f_macro.
def enterF_macro(self, ctx:sfqlParser.F_macroContext):
pass
# Exit a parse tree produced by sfqlParser#f_macro.
def exitF_macro(self, ctx:sfqlParser.F_macroContext):
pass
# Enter a parse tree produced by sfqlParser#f_list.
def enterF_list(self, ctx:sfqlParser.F_listContext):
pass
# Exit a parse tree produced by sfqlParser#f_list.
def exitF_list(self, ctx:sfqlParser.F_listContext):
pass
# Enter a parse tree produced by sfqlParser#expression.
def enterExpression(self, ctx:sfqlParser.ExpressionContext):
pass
# Exit a parse tree produced by sfqlParser#expression.
def exitExpression(self, ctx:sfqlParser.ExpressionContext):
pass
# Enter a parse tree produced by sfqlParser#or_expression.
def enterOr_expression(self, ctx:sfqlParser.Or_expressionContext):
pass
# Exit a parse tree produced by sfqlParser#or_expression.
def exitOr_expression(self, ctx:sfqlParser.Or_expressionContext):
pass
# Enter a parse tree produced by sfqlParser#and_expression.
def enterAnd_expression(self, ctx:sfqlParser.And_expressionContext):
pass
# Exit a parse tree produced by sfqlParser#and_expression.
def exitAnd_expression(self, ctx:sfqlParser.And_expressionContext):
pass
# Enter a parse tree produced by sfqlParser#term.
def enterTerm(self, ctx:sfqlParser.TermContext):
pass
# Exit a parse tree produced by sfqlParser#term.
def exitTerm(self, ctx:sfqlParser.TermContext):
pass
# Enter a parse tree produced by sfqlParser#items.
def enterItems(self, ctx:sfqlParser.ItemsContext):
pass
# Exit a parse tree produced by sfqlParser#items.
def exitItems(self, ctx:sfqlParser.ItemsContext):
pass
# Enter a parse tree produced by sfqlParser#var.
def enterVar(self, ctx:sfqlParser.VarContext):
pass
# Exit a parse tree produced by sfqlParser#var.
def exitVar(self, ctx:sfqlParser.VarContext):
pass
# Enter a parse tree produced by sfqlParser#atom.
def enterAtom(self, ctx:sfqlParser.AtomContext):
pass
# Exit a parse tree produced by sfqlParser#atom.
def exitAtom(self, ctx:sfqlParser.AtomContext):
pass
# Enter a parse tree produced by sfqlParser#text.
def enterText(self, ctx:sfqlParser.TextContext):
pass
# Exit a parse tree produced by sfqlParser#text.
def exitText(self, ctx:sfqlParser.TextContext):
pass
# Enter a parse tree produced by sfqlParser#binary_operator.
def enterBinary_operator(self, ctx:sfqlParser.Binary_operatorContext):
pass
# Exit a parse tree produced by sfqlParser#binary_operator.
def exitBinary_operator(self, ctx:sfqlParser.Binary_operatorContext):
pass
# Enter a parse tree produced by sfqlParser#unary_operator.
def enterUnary_operator(self, ctx:sfqlParser.Unary_operatorContext):
pass
# Exit a parse tree produced by sfqlParser#unary_operator.
def exitUnary_operator(self, ctx:sfqlParser.Unary_operatorContext):
pass
del sfqlParser |
import pygame, os
class Ruoka(pygame.sprite.Sprite):
def __init__(self, tileMap, pos):
pygame.sprite.Sprite.__init__(self)
for x, y, gid, in tileMap.get_layer_by_name("ruoka"):
image = tileMap.get_tile_image_by_gid(gid)
if image:
self.image = image
break
self.rect = self.image.get_rect()
self.rect.center = pos |
import sys
import time
import warnings
from queue import PriorityQueue
from sys import stdout
import numpy as np
import sim.debug
import sim.devices.components.processor
import sim.simulations.constants
import sim.tasks.job
import sim.tasks.subtask
from sim import debug
from sim.clock import clock
from sim.devices.components.fpga import fpga
from sim.learning.action import BATCH, LOCAL
from sim.simulations import constants
class node:
# message = None
# decision = None
jobQueue = None
taskQueue = None
currentJob = None
# queuedTask = None # used in simple simulation
hasJobScheduled = None # used in simple simulation
previousTimestamp = None
numJobs = None
numJobsDone = None
numTasksDone = None
currentSubtask = None
index = None
energyLevel = None
maxEnergyLevel = None
totalEnergyCost = None
averagePower = None
powerCount = None
latestPower = None
totalSleepTime = None
drawLocation = None
gracefulFailure = False
gracefulFailureLevel = None
offloadingOptions = None
defaultOffloadingOptions = []
platform = None
components = None
processors = None
jobActive = None
alwaysHardwareAccelerate = None
batch = None
currentBatch = None
batchFull = None
# reconsiderBatches = None
maxJobs = None
currentTime = None
currentTd = None # amount of current TD that this node is busy
# drawing
rectangle = None
location = None
# episodeFinished = None
def __init__(self, inputClock, platform, index, components, reconsiderBatches, maxJobs, currentSystemState=None, agent=None, alwaysHardwareAccelerate=None, offPolicy=constants.OFF_POLICY, trainClassification=True):
self.platform = platform
# self.decision = offloadingDecisionClass(self, currentSystemState, agentClass)
# if agentClass is a class, create private
if type(agent) is type(__class__):
# print("agent class", agent)
self.agent = agent(systemState=currentSystemState, owner=self, offPolicy=offPolicy,reconsiderBatches=reconsiderBatches, trainClassification=trainClassification)
else:
self.agent = agent
if inputClock is None:
self.currentTime = clock(self)
else:
self.currentTime = inputClock
# self.resultsQueue = queue
self.index = index
# print("created index", index)
self.maxJobs = maxJobs
# self.nodeType = nodeType
# self.reconsiderBatches = reconsiderBatches
self.setMaxEnergyLevel()
self.gracefulFailureLevel = currentSystemState.getGracefulFailureLevel()
# print('graceful death level set to', self.gracefulFailureLevel)
# sys.exit(0)
self.drawLocation = (0,0)
self.setComponents(components)
self.reset(0)
# self.episodeFinished = episodeFinished
self.alwaysHardwareAccelerate = alwaysHardwareAccelerate
def setMaxEnergyLevel(self, batterySize=constants.DEFAULT_ELASTIC_NODE.BATTERY_SIZE):
self.maxEnergyLevel = node.convertEnergy(batterySize, self.platform.BATTERY_VOLTAGE)
self.resetEnergyLevel()
def setTime(self, newTime):
self.currentTime.set(newTime)
def reset(self, episodeNumber):
self.previousTimestamp = 0
self.jobQueue = PriorityQueue()
self.currentJob = None
self.currentSubtask = None
self.currentBatch = None
if self.taskQueue is not None:
self.taskQueue = PriorityQueue()
sim.debug.out("jobqueue" + str(self.jobQueue))
self.hasJobScheduled = False
self.resetEnergyLevel()
self.averagePower = 0.05
self.latestPower = 0
self.powerCount = 0
self.totalEnergyCost = 0
self.totalSleepTime = 0
self.jobActive = False
self.numJobs = 0
self.numJobsDone = 0
self.numTasksDone = dict()
self.batch = dict()
self.currentTime.reset()
# first run the components can be None
if self.components is not None:
for com in self.components:
com.reset()
self.agent.reset(episodeNumber)
self.offloadingOptions = list(self.defaultOffloadingOptions)
def resetEnergyLevel(self):
self.energyLevel = self.maxEnergyLevel
self.gracefulFailure = False
# get node battery level, limited to different discrete bins if required
def getEnergyLevel(self):
return self.energyLevel
# get node battery level, limited to different discrete bins if required
def getEnergyLevelPercentage(self):
return self.energyLevel / self.maxEnergyLevel
def setComponents(self, components):
if components is None:
return
self.components = components
self.processors = [component for component in self.components if isinstance(component, sim.devices.components.processor.processor)]
# for processor in self.processors:
# processor.timeOutSleep()
@staticmethod
# convert mAh to Joule
def convertEnergy(mah, voltage):
return mah * voltage * 3.6
def __repr__(self):
return "<" + str(type(self)) + " " + str(self.index) + ">"
def setOffloadingOptions(self, allDevices):
self.offloadingOptions = []
for device in self.agent.getOffloadingTargets(allDevices):
if device is not self:
self.offloadingOptions.append(device)
self.defaultOffloadingOptions = list(self.offloadingOptions)
debug.out("set offloading options for %s to %s" % (self, self.offloadingOptions))
def removeOffloadingOption(self, device):
if device in self.offloadingOptions:
self.offloadingOptions.remove(device)
debug.learnOut("removed offloading option %s %s" % (device, self.offloadingOptions))
def removeDefaultOffloadingOption(self, device):
if device in self.defaultOffloadingOptions:
self.defaultOffloadingOptions.remove(device)
# def setOptions(self, options):
# self.setOffloadingDecisions(options)
# def setOffloadingDecisions(self, devices):
# self.agent.setOptions(devices)
def getCurrentConfiguration(self):
# default behaviour is to not have a configuration
return 0
def getNumTasksDone(self, taskname):
if taskname not in self.numTasksDone:
return 0
else:
return self.numTasksDone[taskname]
def incrementTaskDone(self, taskname):
if taskname not in self.numTasksDone:
self.numTasksDone[taskname] = 1
else:
self.numTasksDone[taskname] += 1
def hasFpga(self):
return np.any([isinstance(component, fpga) for component in self.processors])
def hasJob(self):
# busy if any are busy
return self.currentJob is not None
# return self.jobActive # or self.waitingForResult or np.any([device.busy for device in self.components])
# return len(self.jobQueue) > 0
# def prependTask(self, subtask):
# self.jobQueue = [subtask] + self.jobQueue
# set active batch and activate this job
def setActiveJob(self, job):
# grab first task
sim.debug.out("activating job")
self.currentJob = job
self.setCurrentBatch(job)
# start first job in queue
# print("newjob in setactivejob")
return sim.tasks.subtask.newJob(job)
# appends one job to the end of the task queue (used for queueing future tasks)
def addSubtask(self, task):
task.owner = self
sim.debug.out("adding subtask %s %s" % (str(task), str(self)), 'b')
# prioritised tasks without jobs (batchContinue mostly)
taskPriority = task.job\
if task.job is not None else 0
# some simulations remove task queues (because they are queued elsewhere)
if self.taskQueue is not None:
self.taskQueue.put(PrioritizedItem(taskPriority, task))
# # if added by another device (e.g. rxmessage), do not activate yet
# if device is None or device == self:
# if nothing else happening, start task
self.nextTask()
# used in deadlock resolving
# def removeTask(self, task):
# sim.debug.out("REMOVE TASK {0}".format(task))
# self.taskQueue.remove(task)
# sim.debug.out("{} {}".format(self.currentSubtask, task))
# if self.currentSubtask is task:
# self.currentSubtask = None
# sim.debug.out ("next task...")
# self.nextTask()
def nextTask(self):
# only change task if not performing one at the moment
if self.currentSubtask is None:
# check if there is another task is available
if self.hasSubtask():
nextSubTask = self.taskQueue.get()
self.currentSubtask = nextSubTask.item
sim.debug.out("next subtask %s from %s" % (nextSubTask, nextSubTask.priority))
# # do receive tasks first, because other device is waiting
# for task in self.taskQueue:
# if isinstance(task, sim.subtask.rxMessage):
# self.currentSubtask = task
# self.taskQueue.remove(task)
# break
# # if still nothing, do a normal task
# if self.currentSubtask is None:
# # if any of the tasks have been started continue that
# for task in self.taskQueue:
# if task.started:
# self.currentSubtask = task
# self.taskQueue.remove(task)
# break
#
# # lastly, see if tx messages are available
# if self.currentSubtask is None:
# # do receive tasks first, because other device is waiting
# for task in self.taskQueue:
# if isinstance(task, sim.subtask.txMessage):
# self.currentSubtask = task
# self.taskQueue.remove(task)
# break
#
# # if nothing else to do, just do the oldest task that isn't a new job
# if self.currentSubtask is None:
# self.currentSubtask = self.taskQueue.popleft()
# if len(self.taskQueue) > 1:
if self.getNumSubtasks() > 1:
sim.debug.out("")
sim.debug.out("nextTask: {} {}".format(self.currentSubtask, self.taskQueue))
sim.debug.out("")
# self.currentTask = self.taskQueue[0]
# remove from queue because being processed now
# self.taskQueue.remove(self.currentTask)
self.currentSubtask.owner = self
sim.debug.out(str(self) + " NEXT TASK " + str(self.currentSubtask))
else:
# sim.debug.out("no next task")
self.currentSubtask = None
def getNumJobs(self):
return self.jobQueue.qsize()
def getNumSubtasks(self):
if self.taskQueue is None:
return None
else:
return self.taskQueue.qsize()
# check if this node has a subtask lined up
def hasSubtask(self):
# return len(self.taskQueue) > 0
return self.getNumSubtasks() > 0
# try another task if this one is stuck
def swapTask(self):
sim.debug.out(self, "SWAPPING TASK\n\n\n\n")
if sim.debug.enabled:
time.sleep(.1)
# move current task to queue to be done later
self.addSubtask(self.currentSubtask) # current task not None so nextTask won't start this task again
self.currentSubtask = None
self.nextTask()
def asleep(self):
for component in self.components:
# if anything awake, device not sleeping
if not component.isSleeping():
return False
# if it gets here, nothing is awake
return True
# reconsider each job in batch, maybe start it. return which device (if any) is affected
def reconsiderBatch(self):
sim.debug.learnOut("deciding whether to continue batch ({}) or not".format(self.batchLengths()), 'b')
# sim.debug.out("Batch before: {0}/{1}".format(self.batchLength(self.currentJob.currentTask), sim.constants.MINIMUM_BATCH), 'c')
sim.debug.out("Batch lengths before reconsider: {}".format(self.batchLengths()), 'c')
for batchName in self.batch:
currentBatch = self.batch[batchName]
for job in currentBatch:
sim.debug.out("considering job from batch {}".format(job))
newChoice = self.agent.redecideDestination(job.currentTask, job, self)
sim.debug.out("new decision: %s" % newChoice)
# print("updated", newChoice)
# check if just batching
if newChoice == BATCH or newChoice.offloadingToTarget(self.index): # (isinstance(newChoice, sim.offloadingDecision.offloading) and newChoice.targetDeviceIndex == self.owner.index):
sim.debug.learnOut("just batching again: {}".format(newChoice), 'p')
else:
# update destination
sim.debug.learnOut("changing decision to {}".format(newChoice), 'p')
job.setDecisionTarget(newChoice)
# remove it from the batch
self.removeJobFromBatch(job)
self.currentJob = job
return job.activate()
sim.debug.out("Batch lengths after reconsider: {}".format(self.batchLengths()), 'c')
return None
# sim.debug.out("Batch after: {0}/{1}".format(self.currentJob.processingNode.batchLength(self.job.currentTask), sim.constants.MINIMUM_BATCH), 'c')
def continueBatch(self, previousJob):
# assert task in self.batch
assert self.currentBatch is not None
if self.batchLength(self.currentBatch) == 0:
debug.learnOut("no more in batch %s for %s" % (self.currentBatch, self))
# print("batch done", self.currentBatch)
self.currentJob = None
return None
debug.learnOut("continue batch for %s (%d)" % (self.currentBatch, self.batchLength(self.currentBatch)))
# for name in self.batch:
# print("name", name)
# for j in self.batch[name]:
# print(j,)
# assert task == self.currentBatch
# decide whether to continue with batch or not
possibleNextJob = self.batch[self.currentBatch][0]
if self.agent.reconsiderBatches:
newChoice = self.agent.redecideDestination(possibleNextJob.currentTask, possibleNextJob, self)
debug.learnOut("decided to continue batch at %s?: %s" % (possibleNextJob, newChoice))
proceed = newChoice != BATCH
else:
# always continue batch
newChoice = self.agent.getAction(LOCAL)
# possibleNextJob.latestAction = self.agent.getActionIndex(newChoice)
debug.learnOut("default to continue batch: %s" % newChoice)
proceed = True
if proceed:
possibleNextJob.setDecisionTarget(newChoice)
# if decided to continue with this batch
if proceed:
if self.batchLength(self.currentBatch) > 0:
self.currentJob = self.batch[self.currentBatch][0]
# previousJob is destroyed if offloaded due to graceful failure
if not self.gracefulFailure:
self.currentJob.combineJobs(previousJob)
self.removeJobFromBatch(self.currentJob)
return self.currentJob.activate()
else:
raise Exception("wanted to continue with batch but nothing available")
else:
self.currentJob = None
return None
# calculate the energy at the current activity of all the components
def energy(self): # , duration=sim.constants.TD):
assert self.currentTd is not None
# totalPower = 0
# for component in self.components:
# totalPower += component.power()
totalPower = self.getTotalPower()
if totalPower >= 1:
sim.debug.out("massive power usage!")
# sim.debug.enabled = True
return self.updateDeviceEnergy(totalPower)
def updateDeviceEnergy(self, totalPower):
self.updateAveragePower(totalPower)
assert self.currentTd is not None
incrementalEnergy = totalPower * self.currentTd
# ensure only using each time diff once
self.totalEnergyCost += incrementalEnergy
# TODO: assuming battery powered
# print (incrementalEnergy)
self.energyLevel -= incrementalEnergy
self.latestPower = totalPower
# print(self.currentTd, "@", totalPower)
debug.out("updating device energy %f %f %f %f" % (self.currentTd, incrementalEnergy, self.totalEnergyCost, self.energyLevel))
# update device time if local time used
if self.currentTime is not None:
self.previousTimestamp = self.currentTime.current
self.currentTime.increment(self.currentTd)
self.currentTd = None
return incrementalEnergy
def getComponentStates(self):
return [component.getPowerState() for component in self.components]
def getTotalPower(self):
# calculate total power for all components
totalPower = 0
for component in self.components:
# print("component", component, component.getPowerState())
totalPower += component.power()
# stdout.write("%f " % component.power())
# stdout.write("\n")
# stdout.flush()
return totalPower
def updateAveragePower(self, power):
self.powerCount += 1
# 1/n
# self.averagePower += 1.0 / self.powerCount * (power - self.averagePower)
# alpha
self.averagePower += sim.simulations.constants.EXPECTED_LIFETIME_ALPHA * (power - self.averagePower)
# sim.debug.out("average power: {}, {}".format(power, self.averagePower))
def getAveragePower(self):
return (self.maxEnergyLevel - self.energyLevel) / self.currentTime.current
def updateDevice(self, subtask=None, visualiser=None):
affectedDevices = None
affectedDevice = None
devicePower = 0
# if no jobs available, perhaps generate one
debug.out("update device %s: %s (%s) [%s]" % (self, self.currentSubtask, self.currentJob, subtask), 'g')
if subtask is None:
# see if there's a job available
if self.currentJob is None:
# print(self, "next job")
self.nextJob()
debug.out("next job: %s" % self.currentJob)
# restarting existing job
elif self.currentJob.started and not self.currentJob.active:
sim.debug.out("restarting existing job", 'r')
affectedDevice = self.currentJob.activate()
# check if there's something to be done now
if self.currentSubtask is None:
self.nextTask()
debug.out("next subtask: %s" % self.currentSubtask)
else:
# ensure that correct subtask is being done
if self.currentSubtask != subtask:
if self.currentSubtask is None or self.taskQueue is None:
self.currentSubtask = subtask
else:
print("current:", self.currentSubtask, "subtask:", subtask)
print(self.taskQueue)
time.sleep(0.5)
raise Exception("device already has different subtask!")
debug.out("subtask specified: %s" % self.currentSubtask, 'b')
if self.currentSubtask is None:
debug.out("%s" % (self.batch))
duration = None
# do process and check if done
if self.currentSubtask is not None:
sim.debug.out("updating device %s %s %s" % (self, self.currentSubtask, self.getNumSubtasks()), 'r')
affectedDevices, duration, devicePower = self.currentSubtask.update(visualiser) # only used in simple simulations
# self.updatePreviousTimestamp(self.currentTime + duration)
self.currentTime.increment(duration)
debug.out("subtask %s time handled from %f to %f" % (self, self.previousTimestamp, self.currentTime.current), 'p')
# print(affectedDevices, duration)
else:
# just idle, entire td is used
self.currentTd = sim.simulations.constants.TD
if affectedDevice is not None:
affectedDevices += [affectedDevice]
return affectedDevices, duration, devicePower
def updatePreviousTimestamp(self, newTimestamp):
if newTimestamp > self.previousTimestamp:
self.previousTimestamp = newTimestamp
debug.out("%s time handled to %f" % (self, self.previousTimestamp), 'p')
# def timeOutSleep(self):
# # check for idle sleep trigger
# for component in self.components:
# if isinstance(component, sim.devices.components.processor.processor):
# component.timeOutSleep()
# def updateSleepStatus(self, asleepBefore):
# raise Exception("deprecated")
# self.timeOutSleep()
#
# asleepAfter = self.asleep()
#
# if asleepBefore and asleepAfter:
# warnings.warn("don't think this is correct")
# self.totalSleepTime += self.currentTd
def incremementTotalSleepTime(self, increment):
self.totalSleepTime += increment
def expectedLifetime(self):
# estimate total life time based on previous use
if self.averagePower == 0:
return np.inf
else:
return self.energyLevel / self.averagePower
def setCurrentBatch(self, job):
if job.hardwareAccelerated:
self.currentBatch = job.currentTask
else:
self.currentBatch = 0
sim.debug.out("Setting current batch to {} ({})".format(self.currentBatch, job), 'b')
# time.sleep(.5)
def addJobToBatch(self, job):
if job.hardwareAccelerated:
task = job.currentTask
else:
# 0 task indicates software solutions
task = 0
# print("adding job to batch", self.batch)
# create list if new task
if task not in self.batch.keys():
self.batch[task] = list()
self.batch[task].append(job)
def batchLengths(self):
return [len(batch) for key, batch in self.batch.items()]
def maxBatchLength(self):
# investigate batch if not empty
if self.batch:
longestBatch = np.argmax([len(item) for key, item in self.batch.items()])
return len(self.batch[list(self.batch.keys())[longestBatch]]), longestBatch
else:
return 0, 0
def batchLength(self, task):
# return batch length for a specific task
if task in self.batch:
return len(self.batch[task])
else:
sim.debug.out("batch {} does not exist".format(task))
# print(self.batch)
# print()
return 0
def isQueueFull(self, task):
full = self.batchLength(task) >= self.maxJobs
debug.out("jobs: %d full: %s" % (self.batchLength(task), full))
return full
def nextJobFromBatch(self):
# print("currentjob", self.currentJob)
if self.currentJob is None:
# print([len(self.batch[batch]) > 0 for batch in self.batch])
# if len(self.batch) > 0:
# print ("keys", self.batch.keys())
maxBatchLength, maxBatchIndex = self.maxBatchLength()
# print('max batch', maxBatchLength, maxBatchLength)
if maxBatchLength > 0:
# check current batch
if self.currentBatch is None:
# batch first job in batch
if self.batch.keys() is not None:
self.currentBatch = list(self.batch.keys())[maxBatchIndex]
# TODO: must keep going until all batches are empty
sim.debug.out("starting batch {}".format(self.currentBatch))
sim.debug.out("grabbed job from batch")
# if self.currentBatch not in self.batch.keys():
# print ("Batch does not exist in node", self.batch, self.currentBatch)
self.currentJob = self.batch[self.currentBatch][0]
self.removeJobFromBatch(self.currentJob)
return self.currentJob
else:
sim.debug.out("No more jobs in batch", 'c')
# self.batchProcessing = False
self.currentJob = None
self.currentBatch = None
# else:
# print("already has active job...")
# return self.currentJob
return None
def removeJobFromBatch(self, job):
sim.debug.out("batch before remove {}".format(self.batch))
found = False
# check if current batch exists
if self.currentBatch is not None:
if job in self.batch[self.currentBatch]:
self.batch[self.currentBatch].remove(job)
found = True
else:
raise Exception("Could not find job to remove from batch")
# no batch exists
else:
# find job in all batches
for key in self.batch:
if job in self.batch[key]:
self.batch[key].remove(job)
found = True
break
assert found is True
sim.debug.out("batch after remove {}".format(self.batch))
def addJobToQueue(self, job):
sim.debug.out("adding %s to queue of %s" % (job, self))
self.jobQueue.put((job.id, job))
def nextJob(self):
# print("nextjob", self.currentJob, self.getNumJobs(), )
if self.currentJob is None:
if self.getNumJobs() > 0:
index, self.currentJob = self.jobQueue.get()
sim.debug.out("grabbed job from queue: %s (%d)" % (self.currentJob, self.getNumJobs()))
# self.jobQueue.remove(self.currentJob)
# see if it's a brand new job
if not self.currentJob.started:
affectedDevice, newSubtask = self.currentJob.start()
affectedDevice.addSubtask(newSubtask) # add subtask from brand new job
return affectedDevice, newSubtask
else:
assert self.getNumSubtasks() > 0
sim.debug.out("\tALREADY STARTED")
# no new jobs started
return None
def removeJob(self, job):
sim.debug.out("REMOVE JOB FROM {}".format(self))
# sim.debug.out ('{} {}'.format(self.jobQueue, job))
# try to remove from queue (not there if from batch)
# if job in self.jobQueue:
# self.jobQueue.remove(job)
# set as not current job
if self.currentJob is job:
# print("set job to NONE")
self.currentJob = None
sim.debug.out("resulting job: %s (%s)" % (self.currentJob, str(self.getNumSubtasks())))
def checkGracefulFailure(self):
if not self.gracefulFailure:
if self.getEnergyLevelPercentage() <= self.gracefulFailureLevel:
# print(self, "graceful failure", self.getEnergyLevelPercentage(), self.gracefulFailureLevel)
self.performGracefulFailure()
def performGracefulFailure(self):
self.gracefulFailure = True
# print(self.gracefulFailureLevel, self.getEnergyLevelPercentage())
for dev in self.offloadingOptions: dev.removeOffloadingOption(self)
# print("graceful failure with", self, self.batchLengths(), np.sum(self.batchLengths()))
def hasOffloadingOptions(self):
# print(self.offloadingOptions, len(self.offloadingOptions))
return len(self.offloadingOptions) > 0
from dataclasses import dataclass, field
from typing import Any
@dataclass(order=True)
class PrioritizedItem:
priority: int
item: Any=field(compare=False)
def __repr__(self): return "(%.2f - %s)" % (self.priority, self.item) |
import sys
import logging
from pbcommand.common_options import add_log_debug_option
from pbcommand.cli import pacbio_args_runner, get_default_argparser
from pbcommand.utils import setup_log
from pbcommand.cli.utils import main_runner_default
from pbcommand.validators import validate_file
import pbsmrtpipe.report_renderer as R
import pbcommand.cli.utils as U
from pbcommand.pb_io import load_report_from_json
log = logging.getLogger(__name__)
slog = logging.getLogger('status.' + __name__)
__version__ = '1.0'
def _validate_report(path):
p = validate_file(path)
_ = load_report_from_json(path)
return p
def _add_output_file_option(p):
p.add_argument('--output-file', required=True, type=str, help="Path of output html")
return p
def _add_ccs_js_extras_option(p):
_d = "Write styled CSS and JS dirs/files"
p.add_argument('--with-extras', action='store_true', help=_d)
return p
def _add_report_option(p):
p.add_argument('report_path', type=_validate_report, help="Path to pbreport JSON report")
return p
def _args_to_render_report(args):
f = R.write_report_with_html_extras if args.with_extras else R.write_report_to_html
report = load_report_from_json(args.report_path)
return f(report, args.output_file)
def get_parser():
desc = "Transform pbreport Report to HTML file."
p = get_default_argparser(__version__, desc)
add_log_debug_option(p)
U.add_output_dir_option(p)
_add_report_option(p)
_add_output_file_option(p)
_add_ccs_js_extras_option(p)
p.set_defaults(func=_args_to_render_report)
return p
def main(argv=None):
argv_ = sys.argv if argv is None else argv
parser = get_parser()
return pacbio_args_runner(argv_[1:], parser, _args_to_render_report, log, setup_log)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import signal
import pygame
import time
import os
import subprocess
from pygame import mixer
from pygame.locals import *
from random import randint
class Player:
def __init__(self):
self.list = []
self.play_flag = False
self.pause_flag = False
self.volume = 0.5
self.curr_name = ''
pygame.mixer.init()
def add(self, item):
self.list.append(item)
def load(self, filename):
with open(filename, 'r') as list_file:
for line in list_file.readlines():
line = line.strip('\n').strip()
if len(line) > 0:
self.list.append(line.strip('\n').strip())
def play(self):
#print 'play', self.play_flag, self.pause_flag
if self.play_flag is False:
mixer.music.load(self.next_name())
mixer.music.play()
self.play_flag = True
self.pause_flag = False
else:
if self.pause_flag is False:
mixer.music.pause()
self.pause_flag = True
else:
mixer.music.unpause()
self.pause_flag = False
def pause(self):
mixer.music.pause()
self.pause_flag = True
def add_volume(self, volume):
#print 'add_volume', volume
if volume > 0:
self.volume = min(self.volume + volume, 1.0)
else:
self.volume = max(self.volume + volume, 0.0)
mixer.music.set_volume(self.volume)
def stop(self):
mixer.music.stop()
self.play_flag = False
self.pause_flag = False
def next_name(self):
self.curr_name = self.list[randint(0, len(self.list) - 1)]
return self.curr_name
def get_name(self):
#print self.curr_name.split('/')[-1].split('.')[0]
return self.curr_name.split('/')[-1].split('.')[0]
def get_busy(self):
return mixer.music.get_busy()
def quit(self):
mixer.quit()
def get_time(self):
sound = mixer.Sound(self.get_name())
return sound.get_length()
COLOR_WHITE = (255, 255, 255)
COLOR_BLACK = (0, 0, 0)
COLOR_GRAY = (128, 128, 128)
COLOR_LIGHTGREEN = (108, 202, 138)
COLOR_LIGHTGREEN_C = (171, 241, 194)
SCREEN_SIZE = [360, 200]
def _is_in_(position, rect):
return position[0] >= rect[0] and position[0] <= rect[0] + rect[2] and position[1] >= rect[1] and position[1] <= rect[1] + rect[3]
class ControlPause:
def __init__(self, manager):
self.x = SCREEN_SIZE[0] - 40
self.y = 6
self.w = 30
self.h = 30
self.hide = False
self.inflg = False
self.name = 'ctl_pause'
self.manager = manager
self.color = COLOR_LIGHTGREEN
def set_player(self, player):
self.player = player
def draw(self, screen):
m_position = [self.x, self.y, self.w, self.h]
l_position = [self.x+8, self.y+6, self.w/2-10, self.h-12]
r_position = [self.x+self.w/2+2, self.y+6, self.w/2-10, self.h-12]
pygame.draw.rect(screen, self.color, m_position)
pygame.draw.rect(screen, COLOR_WHITE, l_position)
pygame.draw.rect(screen, COLOR_WHITE, r_position)
def on_click(self):
#print 'ControlPause on_click'
self.manager.forward('show', name='ctl_play')
self.player.pause()
def move_in(self):
if self.inflg: return
self.inflg = True
#print 'ControlPause move_in'
self.color = COLOR_LIGHTGREEN_C
def move_out(self):
self.inflg = False
#print 'ControlPause move_out'
self.color = COLOR_LIGHTGREEN
def is_in(self, position):
if self.hide: return False
return _is_in_(position, [self.x, self.y, self.w, self.h])
def show(self):
self.hide = False
class ControlPlay:
def __init__(self, manager):
self.x = 0
self.y = 0
self.w = SCREEN_SIZE[0]
self.h = SCREEN_SIZE[1]
self.hide = True
self.inflg = False
self.name = 'ctl_play'
self.manager = manager
def set_player(self, player):
self.player = player
def draw(self, screen):
if self.hide: return
pass
def on_click(self):
#print 'ControlPlay on_click'
self.player.play()
self.manager.forward('set_content', name='ctl_txt_name', args=self.player.get_name())
#self.manager.forward('notify', name='ctl_txt_name')
self.hide = True
def move_in(self):
if self.inflg: return
self.inflg = True
#print 'ControlPlay move_in'
def move_out(self):
self.inflg = False
#print 'ControlPlay move_out'
def is_in(self, position):
if self.hide: return False
return _is_in_(position, [self.x, self.y, self.w, self.h])
def show(self):
self.hide = False
class ControlNext:
def __init__(self, manager):
self.x = SCREEN_SIZE[0] - 40
self.y = SCREEN_SIZE[1] - 60
self.w = 30
self.h = 30
self.hide = False
self.inflg = False
self.name = 'ctl_next'
self.manager = manager
self.color = COLOR_BLACK
def set_player(self, player):
self.player = player
def draw(self, screen):
#m_position = [self.x, self.y, self.w, self.h]
#pygame.draw.rect(screen, COLOR_BLACK, m_position, 1)
l_pos_list = [(self.x, self.y), (self.x, self.y+self.h), (self.x+self.w/2+4,self.y+self.h/2)]
r_pos_list = [(self.x+self.w/2, self.y), (self.x+self.w/2, self.y+self.h), (self.x+self.w,self.y+self.h/2)]
pygame.draw.polygon(screen, self.color, l_pos_list)
pygame.draw.polygon(screen, self.color, r_pos_list)
def on_click(self):
#print 'ControlNext on_click'
self.player.stop()
self.manager.forward('on_click', name='ctl_play')
def move_in(self):
if self.inflg: return
self.inflg = True
#print 'ControlNext move_in'
#print 'ControlNext move_out'
self.color = COLOR_GRAY
def move_out(self):
self.inflg = False
#print 'ControlNext move_out'
self.color = COLOR_BLACK
def is_in(self, position):
if self.hide: return False
return _is_in_(position, [self.x, self.y, self.w, self.h])
def show(self):
self.hide = False
class ControlProgressBar:
def __init__(self, manager):
self.x = 20
self.y = SCREEN_SIZE[1] / 2
self.w = SCREEN_SIZE[0] - 40
self.h = 2
self.hide = False
self.inflg = False
self.name = 'ctl_prb'
self.manager = manager
def set_player(self, player):
self.player = player
def draw(self, screen):
m_position = [self.x, self.y, self.w, self.h]
pygame.draw.rect(screen, COLOR_GRAY, m_position, 1)
def on_click(self):
#print 'ControlProgressBar on_click'
def move_in(self):
if self.inflg: return
self.inflg = True
#print 'ControlProgressBar move_in'
def move_out(self):
self.inflg = False
#print 'ControlProgressBar move_out'
def is_in(self, position):
if self.hide: return False
return _is_in_(position, [self.x, self.y, self.w, self.h])
def show(self):
self.hide = False
class ControlText:
def __init__(self, manager):
self.x = 20
self.y = SCREEN_SIZE[1]/2 - 32
self.w = SCREEN_SIZE[0] - 40
self.h = 30
self.hide = False
self.inflg = False
self.name = 'ctl_txt_name'
self.manager = manager
self.content = ''
self.font = pygame.font.SysFont("arial", 20)
def set_player(self, player):
self.player = player
def set_content(self, content):
self.content = content
def draw(self, screen):
#m_position = [self.x, self.y, self.w, self.h]
#pygame.draw.rect(screen, COLOR_BLACK, m_position, 1)
text = self.font.render(self.content, True, COLOR_BLACK)
screen.blit(text, (self.x, self.y+(self.h-18)/2))
pygame.display.set_caption(self.content)
def notify(self):
return
subprocess.call([
'notify-send',
'-i',
'',
self.content,
''])
def on_click(self):
#print 'ControlText on_click'
def move_in(self):
if self.inflg: return
self.inflg = True
#print 'ControlText move_in'
def move_out(self):
self.inflg = False
#print 'ControlText move_out'
def is_in(self, position):
if self.hide: return False
return _is_in_(position, [self.x, self.y, self.w, self.h])
def show(self):
self.hide = False
def set_content(self, content):
self.content = content
class ControlManager:
def __init__(self):
self.controls = []
def add(self, ctlclass, control):
self.controls.append((ctlclass, control))
def forward(self, function, args=None, position=None, name=None):
for ctlclass,control in self.controls:
if (position is not None):
if control.inflg and not control.is_in(position):
control.move_out()
if (position is not None and control.is_in(position)) or \
(name is not None and control.name == name):
if args is None:
ctlclass.__dict__[function](control)
else:
ctlclass.__dict__[function](control, args)
break
def draw(self, screen):
for ctlclass, control in self.controls:
control.draw(screen)
def set_player(self, player):
for ctlclass, control in self.controls:
control.set_player(player)
class Lemon:
def __init__(self):
pygame.init()
pygame.font.init()
self.stop_flag = False
self.ctlmanager = ControlManager()
self.ctlmanager.add(ControlPlay, ControlPlay(self.ctlmanager))
self.ctlmanager.add(ControlPause, ControlPause(self.ctlmanager))
self.ctlmanager.add(ControlNext, ControlNext(self.ctlmanager))
self.ctlmanager.add(ControlProgressBar, ControlProgressBar(self.ctlmanager))
self.ctlmanager.add(ControlText, ControlText(self.ctlmanager))
self.player = Player()
def start(self):
self.screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Lemon")
self.ctlmanager.set_player(self.player)
#self.player.load('play.lst')
self.draw()
pygame.time.delay(1000)
self.ctlmanager.forward('on_click', name='ctl_play')
#print 'time:', self.player.get_time()
def update(self):
pygame.display.update()
def draw(self):
self.screen.fill(COLOR_WHITE)
self.ctlmanager.draw(self.screen)
self.update()
def run(self):
while not self.stop_flag:
self.draw()
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
self.stop_flag = True
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.ctlmanager.forward('on_click', name='ctl_next')
elif event.key == K_RIGHT:
self.ctlmanager.forward('on_click', name='ctl_next')
elif event.key == K_UP:
self.player.stop()
elif event.key == K_DOWN:
self.ctlmanager.forward('on_click', name='ctl_play')
elif event.key == K_EQUALS:
self.player.add_volume(0.1)
elif event.key == K_MINUS:
self.player.add_volume(-0.1)
elif event.type == MOUSEBUTTONUP:
self.ctlmanager.forward('on_click', position=pygame.mouse.get_pos())
elif event.type == MOUSEMOTION:
self.ctlmanager.forward('move_in', position=pygame.mouse.get_pos())
time.sleep(0.01)
if not self.player.get_busy():
self.ctlmanager.forward('on_click', name='ctl_next')
self.quit()
def quit(self):
self.player.quit()
pygame.quit()
def load_mp3(self):
with open('path.lst', 'r') as path_file:
for line in path_file.readlines():
self.load_mp3_with_path(line.strip('\n'))
def load_mp3_with_path(self, path):
if len(path) == 0: return
for file_item in os.listdir(path):
if file_item[-4:] == '.mp3':
self.player.add(path.rstrip('/') + '/' + file_item)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf8')
lemon = Lemon()
lemon.load_mp3()
lemon.start()
lemon.run()
|
import os
import csv
from collections import defaultdict
import argparse
def ReadMyCsv(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader:
SaveList.append(row)
return
def StorFile(data, fileName):
with open(fileName, "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data)
return
def GenerateBitscoreFeatureStep1(Remainingfilename):
card_and_ardb_gene_name = open("Database_Gene_Name.txt", 'w')
for i in open(Remainingfilename):
i = i.strip().split(">")
try:
if i[1]:
card_and_ardb_gene_name.writelines(i[1])
card_and_ardb_gene_name.write("\n")
except:
continue
def GenerateBitscoreFeatureStep2(filename):
final_pair = []
with open("Database_Gene_Name.txt") as f:
all_line = f.readlines()
num = len(all_line)
for i in open(filename):
i = i.strip().split(">")
pair = []
try:
if i[1]:
pair.append(i[1])
for k in range(num):
pair.append(0.0)
final_pair.append(pair)
except:
continue
StorFile(final_pair, "Feature0_Matrix.txt")
def GenerateBitscoreFeatureStep3(filename, Remainingfilename,Diamond_Path):
os.system(Diamond_Path+ " makedb --in " + Remainingfilename + " --db Database_GENE.dmnd")
os.system(Diamond_Path+
" blastp --db Database_GENE.dmnd --query " + filename + " --out ArgVFNeg_diamond_Database.txt --more-sensitive")
# with open("ArgVFNeg_diamond_Database.txt") as fr:
# all_lines = fr.readlines()
# ArgVFNeg_diamond_Database2 = open("ArgVFNeg_diamond_Database2.txt", 'w')
# for i in range(len(all_lines)):
# if all_lines[i].strip().split("\t")[0] != all_lines[i].strip().split("\t")[1]:
# ArgVFNeg_diamond_Database2.writelines(all_lines[i])
def GenerateBitscoreFeatureStep4():
bit_score_feature_file = open("Bit_Score_Feature_Matrix.txt", 'w')
with open("Feature0_Matrix.txt") as f:
all_line = f.readlines()
all_num = len(all_line)
with open("Database_Gene_Name.txt") as f3:
all_line3 = f3.readlines()
Database_Gene_Name_dict=defaultdict(list)
for i in range(len(all_line3)):
seq=all_line3[i].strip()
Database_Gene_Name_dict[seq].append(i)
with open("ArgVFNeg_diamond_Database2.txt") as y:
match_lines = y.readlines()
ArgVFNeg_diamond_Database_dict = defaultdict(list)
for i in range(len(match_lines)):
seq = match_lines[i].strip().split("\t")
ArgVFNeg_diamond_Database_dict[seq[0]].append(seq[1])
ArgVFNeg_diamond_Database_dict[seq[0]].append(seq[-1])
countnum = 0
for i in open("Feature0_Matrix.txt"):
print("countnum", countnum)
countnum += 1
i = i.strip().split(",")
gene_name = i[0]
list1=ArgVFNeg_diamond_Database_dict[gene_name]
for j in range(0,len(list1),2):
posi=Database_Gene_Name_dict[list1[j]][0]
i[posi]=list1[j+1]
for m in range(len(i) - 1):
bit_score_feature_file.writelines(i[m])
bit_score_feature_file.write(",")
bit_score_feature_file.writelines(i[-1])
if countnum < all_num:
bit_score_feature_file.write("\n")
def GenerateBitscoreFeatureStep5():
normal_bit_score_feature_file = open("Normalized_Bit_Score_Feature_Matrix.txt", 'w')
with open("Bit_Score_Feature_Matrix.txt") as f:
all_line = f.readlines()
all_num = len(all_line)
num = 0
for i in open("Bit_Score_Feature_Matrix.txt"):
num += 1
print(num)
i = i.strip().split(",")
max = -1.0
min = 10000.0
normal_bit_score_feature_file.writelines(i[0])
normal_bit_score_feature_file.write(",")
for j in range(1, len(i)):
# print j,i[j]
now_num = float(i[j])
if now_num < min:
min = now_num
if now_num > max:
max = now_num
if min == max == 0:
for k in range(1, len(i) - 1):
normal_bit_score_feature_file.writelines(str(i[k]))
normal_bit_score_feature_file.write(",")
normal_bit_score_feature_file.writelines(str(i[-1]))
else:
for k in range(1, len(i) - 1):
normal_bit_score_feature_file.writelines(str((float(i[k]) - min) / (max - min)))
normal_bit_score_feature_file.write(",")
normal_bit_score_feature_file.writelines(str((float(i[-1]) - min) / (max - min)))
if num < all_num:
normal_bit_score_feature_file.write("\n")
def GenerateBitscoreFeature(filename, Remainingfilename,outputDir, Diamond_Path):
GenerateBitscoreFeatureStep1(Remainingfilename)
GenerateBitscoreFeatureStep2(filename)
GenerateBitscoreFeatureStep3(filename, Remainingfilename,Diamond_Path)
GenerateBitscoreFeatureStep4()
GenerateBitscoreFeatureStep5()
if os.path.exists(outputDir) == False:
os.mkdir(outputDir)
os.system("mv ArgVFNeg_diamond_Database.txt " + outputDir + "/Bitscore")
os.system("mv ArgVFNeg_diamond_Database2.txt "+outputDir+"/Bitscore")
os.system("mv Bit_Score_Feature_Matrix.txt " + outputDir + "/Bitscore")
os.system("mv Database_GENE.dmnd " + outputDir + "/Bitscore")
os.system("mv Database_Gene_Name.txt " + outputDir + "/Bitscore")
os.system("mv Feature0_Matrix.txt " + outputDir + "/Bitscore")
os.system("mv Normalized_Bit_Score_Feature_Matrix.txt " + outputDir + "/Bitscore")
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="it's usage tip.", description="generate bitscore feature")
parser.add_argument("--file", required=True, help="protein sequence file in fasta format")
parser.add_argument("--db_file", required=True, help="protein sequence file in fasta format")
parser.add_argument("--diamond_path", help="the path of diamond program")
parser.add_argument("--outdir", help="the path of out dir")
args = parser.parse_args()
filename=args.file
Remainingfilename=args.db_file
outputDir = args.outdir
Diamond_Path=args.diamond_path
GenerateBitscoreFeature(filename, Remainingfilename,outputDir, Diamond_Path)
|
# -*- coding: utf-8 -*-
{
'name': "Activar Cliente / Proveedor en Socio",
'summary': """
Activar si es Cliente / Proveedor en Socio""",
'description': """
Activar si es Cliente / Proveedor en el módulo de Partner.
""",
'author': "TH",
'website': "http://www.cabalcon.com",
'category': 'Tools',
'version': '1.1',
# any module necessary for this one to work correctly
'depends': ['contacts'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/res_partner_views.xml',
],
}
|
# -- coding: utf-8 --
from sys import argv #调用 sys 模块的 argv 函数
script, filename = argv # 把两个值赋给 argv 运行的时候要把变量给 argv可以是文件
txt_file = open(filename)
print "Here's your file %r." % filename
print txt_file.read()
filename_again = raw_input ("Please type:\n")
the_file = open(filename_again) #打印txt 文件
a = the_file.read()
print a
the_file.close()
|
from Perceptron import *
import pandas as pd
import matplotlib.pyplot as plt
data_frame = pd.read_csv("seeds_dataset.txt", header=None)
#Select Rosa (2) and Canadian (3)
#7th column gives species class
y = data_frame.iloc[70:210, 7].values
#if species is "2" label as -1, else (if "3") as 1
y = np.where(y == 2, -1, 1)
#Extract features (columns 0 [Area] and 4 [Length of Kernel Groove])
X = data_frame.iloc[70:210, [0, 6]].values
#Plot data
#First 70 cases in X, which we know are "Rosa"
plt.scatter(X[:70, 0], X[:70, 1], color="red", marker="o", label="Rosa")
#Second 70 cases in X, which we know are "Canadian"
plt.scatter(X[70:140, 0], X[70:140, 1], color="blue", marker="x", label="Canadian")
plt.xlabel("area of kernel [mm*2]")
plt.ylabel("length of kernel groove [mm]")
plt.legend(loc="upper left")
plt.show() |
# Generated by Django 2.0.1 on 2018-03-11 20:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shows', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='ShowGenre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shows.Genre')),
('show', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shows.Show')),
],
),
migrations.AddField(
model_name='episode',
name='episode_nr',
field=models.IntegerField(default=None),
),
migrations.AddField(
model_name='episode',
name='season_nr',
field=models.IntegerField(default=None),
),
]
|
#encoding: utf-8
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from feitec.projeto.models import *
from django.forms import ModelForm, TextInput, PasswordInput, HiddenInput
from django.shortcuts import render_to_response,get_object_or_404
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from forms import CadastroForm
from django.contrib.auth.models import User
from django.forms.models import inlineformset_factory
from django.contrib.localflavor.br.forms import BRCPFField, BRPhoneNumberField
from django.core.mail import send_mail, get_connection, EmailMessage
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from django.conf import settings
from cStringIO import StringIO
from datetime import datetime
import datetime
import os
#Pagina principal
@login_required
def principal(request):
if request.method == "POST":
numero = request.POST['professor']
numero2 = request.POST['integrante']
return HttpResponseRedirect(reverse('feitec.projeto.views.criar_projeto' ,args=[numero,numero2]))
else:
usuario = request.user
nome = request.user.username
gerente ='gerencia'
if (nome==gerente):
#projeto = Projeto.objects.all()
cont = Projeto.objects.filter(situacao="Aprovado").count()
#return render_to_response('principal.html',{'projeto':projeto,'usuario':usuario,'nome':nome, 'cont':cont},context_instance=RequestContext(request))
area = Area.objects.all()
return render_to_response('principal.html',{'area':area,'usuario':usuario,'nome':nome,'cont':cont,},context_instance=RequestContext(request))
else:
projeto = Projeto.objects.filter(criador = usuario)
return render_to_response('principal.html',{'projeto':projeto,'usuario':usuario,},context_instance=RequestContext(request))
#Cadastro de Usuario
def cadastrar(request):
if request.POST:
form = CadastroForm(request.POST)
if form.is_valid():
novo_usuario = form.save()
return HttpResponseRedirect('/')
else:
return render_to_response('cadastrar.html', {'form': form}, context_instance=RequestContext(request))
else:
form = CadastroForm()
return render_to_response('cadastrar.html', {'form': form}, context_instance=RequestContext(request))
def consultar_esqueci(request):
if request.method == "POST":
return render_to_response('404.html')
else:
return render_to_response('esqueci_email.html', context_instance = RequestContext(request))
def resultado_esqueci(request):
if request.method == 'POST':
try:
email = request.POST['email']
consulta = User.objects.get(email=email)
mensagem = 'Foi enviado ao seu email sua nova senha:'
random = User.objects.make_random_password(length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789')
consulta.set_password(random)
subject = 'Feitec Nova Senha:'
message = ('Sua nova senha é:' + random)
from_email = ''#email remetente
connection = get_connection(username = '', password ='')#email e senha conexão
send_email = EmailMessage(subject, message , from_email, [consulta.email], connection = connection)
send_email.content_subtype = "html"
send_email.send()
consulta.save()
except ObjectDoesNotExist:
consulta = ''
mensagem = 'Este email não está cadastrado em nosso sistema!'
return render_to_response('esqueci_email.html',{ 'mensagem' : mensagem,}, context_instance = RequestContext(request))
else:
return render_to_response('404.html')
#Cadastro de Projeto get
@login_required
def criar_projeto(request, numero , numero2):
usuario = request.user
if request.method == "POST":
return render_to_response('404.html')
else:
teste = int(numero)
teste2 = int(numero2)
IntegranteFormSet = inlineformset_factory(Projeto, Integrante, form=ProjetoModelForm , extra=teste)
ProfessorFormSet = inlineformset_factory(Projeto, Professor, form=ProjetoModelForm , extra=teste2)
inscricao = {
'criador':usuario,
'area':1,
'campus':1,
}
form = ProjetoModelForm(inscricao)
formset = IntegranteFormSet()
formset2 = ProfessorFormSet()
return render_to_response('cadastrar_projeto.html', {'formset':formset,'formset2':formset2,'form': form,'usuario':usuario,'teste':teste,'teste2':teste2}, context_instance=RequestContext(request))
#Cadastro de Projeto post
@login_required
def cadastrar_projeto(request):
IntegranteFormSet = inlineformset_factory(Projeto, Integrante, form=ProjetoModelForm)
ProfessorFormSet = inlineformset_factory(Projeto, Professor, form=ProjetoModelForm)
if request.method == 'POST':
form = ProjetoModelForm(request.POST)
if form.is_valid():
projeto = form.save(commit=False)
formset = IntegranteFormSet(request.POST, instance=projeto)
formset2 = ProfessorFormSet(request.POST, instance=projeto)
if formset.is_valid():
c = form.save()
d = formset.save()
e = formset2.save()
return HttpResponseRedirect('/')
else:
return render_to_response(form.errors)
else:
return render_to_response('projeto_existe.html',context_instance=RequestContext(request))
else:
return render_to_response('404.html')
#Editar projeto
@login_required
def mostra_dados_projeto(request, codigo):
usuario = request.user
projeto = get_object_or_404(Projeto, pk=codigo)
IntegranteFormSet = inlineformset_factory(Projeto, Integrante,extra = 2)
ProfessorFormSet = inlineformset_factory(Projeto, Professor,extra = 2)
codigo = projeto.codProj
if request.method == 'POST':
f = ProjetoModelForm(request.POST, instance=projeto)
if f.is_valid():
project = f.save(commit=False)
formset = IntegranteFormSet(request.POST, instance=project)
formset2 = ProfessorFormSet(request.POST, instance=project)
if formset.is_valid():
c = f.save()
d = formset.save()
e = formset2.save()
return HttpResponseRedirect('/')
else:
return render_to_response(form.errors)
else:
return render_to_response(form.errors)
else:
f = ProjetoModelForm(instance=projeto)
formset = IntegranteFormSet(instance=projeto)
formset2 = ProfessorFormSet(instance=projeto)
return render_to_response('alterar_dados.html', {'formset':formset,'formset2':formset2,'form':f, 'codigo':codigo,'usuario':usuario,},context_instance=RequestContext(request))
@login_required
def criar_alterar(request,codigo):
usuario = request.user
if request.method == "POST":
projeto = get_object_or_404(Projeto, pk=codigo)
IntegranteFormSet = inlineformset_factory(Projeto, Integrante,extra = 2)
ProfessorFormSet = inlineformset_factory(Projeto, Professor,extra = 2)
codigo = projeto.codProj
integrante = request.POST['integrante']
professor = request.POST['professor']
teste = int(integrante)
teste2 = int(professor)
IntegranteFormSet = inlineformset_factory(Projeto, Integrante,extra = teste)
ProfessorFormSet = inlineformset_factory(Projeto, Professor,extra = teste2)
f = ProjetoModelForm(instance=projeto)
formset = IntegranteFormSet(instance=projeto)
formset2 = ProfessorFormSet(instance=projeto)
return render_to_response('alterar_dados.html', {'formset':formset,'formset2':formset2,'form':f, 'codigo':codigo,'usuario':usuario},context_instance=RequestContext(request))
else:
return render_to_response('404.html')
@login_required
def numerar_participantes(request,codigo):
usuario = request.user
if request.method == "GET":
projeto = Projeto.objects.get(codProj = codigo)
integrante = Integrante.objects.filter(projeto = codigo)
professor = Professor.objects.filter(projeto = codigo)
return render_to_response('numerar_participantes.html',{'codigo':codigo,'projeto':projeto,'integrante':integrante,'professor':professor,'usuario':usuario},context_instance=RequestContext(request))
else:
return render_to_response('404.html')
#Apagar projeto
@login_required
def apagar_projeto(request, codigo):
projeto = get_object_or_404(Projeto, pk=codigo)
projeto.delete()
return HttpResponseRedirect('/')
#Projeto sem poder editar
@login_required
def projeto(request, projeto_codProj):
usuario = request.user
if request.method == "POST":
return render_to_response('404.html')
else:
projeto = Projeto.objects.get(codProj = projeto_codProj)
integrante = Integrante.objects.filter(projeto = projeto_codProj)
professor = Professor.objects.filter(projeto = projeto_codProj)
usuario = request.user
return render_to_response('projeto.html',locals(),context_instance=RequestContext(request))
#Editar situacao de Integrante
@login_required
def editar_integrante(request, codigo):
usuario = request.user
inte = get_object_or_404(Integrante, pk=codigo)
if request.method == 'POST':
form = SituacaoIntModelForm(request.POST, instance=inte)
if form.is_valid():
c = form.save()
return HttpResponseRedirect('/')
else:
return render_to_response(form.errors)
else:
form = SituacaoIntModelForm(instance=inte)
integrante = Integrante.objects.get(codIntegrante = codigo)
return render_to_response('editar_integrante.html',{'form':form, 'integrante':integrante, 'codigo':codigo,'usuario':usuario,}, context_instance=RequestContext(request))
#Editar Sala e avaliar projeto
@login_required
def editar_gerencia(request, codigo):
usuario = request.user
projeto = get_object_or_404(Projeto, pk=codigo)
codigo = projeto.codProj
if request.method == 'POST':
form = ProjetoGerenciaForm(request.POST, instance=projeto)
if form.is_valid():
c = form.save()
return HttpResponseRedirect('/')
else:
return render_to_response(form.errors)
else:
form = ProjetoGerenciaForm(instance=projeto)
professor = Professor.objects.filter(projeto = codigo)
integrante = Integrante.objects.filter(projeto = codigo)
projeto = Projeto.objects.get(codProj = codigo)
return render_to_response('editar_gerencia.html', {'form':form, 'codigo':codigo,'integrante':integrante,'projeto':projeto, 'professor':professor,'usuario':usuario},context_instance=RequestContext(request))
#Editar Sala apos avaliacao do projeto
@login_required
def editar_gerencia2(request, codigo):
usuario = request.user
projeto = get_object_or_404(Projeto, pk=codigo)
codigo = projeto.codProj
if request.method == 'POST':
form = ProjetoGerenciaModelForm(request.POST, instance=projeto)
if form.is_valid():
c = form.save()
return HttpResponseRedirect('/')
else:
return render_to_response(form.errors)
else:
form = ProjetoGerenciaModelForm(instance=projeto)
integrante = Integrante.objects.filter(projeto = codigo)
professor = Professor.objects.filter(projeto = codigo)
projeto = Projeto.objects.get(codProj = codigo)
return render_to_response('editar_gerencia2.html', {'form':form, 'codigo':codigo,'integrante':integrante,'projeto':projeto,'professor':professor,'usuario':usuario,},context_instance=RequestContext(request))
#Exibir todos participantes
@login_required
def todos_participantes(request):
usuario = request.user
part = Integrante.objects.all()
return render_to_response('todos_participantes.html',locals(), context_instance=RequestContext(request))
#Exibir todos professores
@login_required
def todos_professores(request):
usuario = request.user
prof = Professor.objects.all()
return render_to_response('todos_professores.html',locals(), context_instance=RequestContext(request))
#Pagina principal dos certificados
@login_required
def principal_certificado(request):
usuario = request.user
return render_to_response('certificado.html',locals(), context_instance = RequestContext(request))
#Busca de participante
@login_required
def busca_participante(request):
usuario = request.user
if request.method =='POST':
nome = request.POST['nome']
part = Integrante.objects.filter(nomeIntegrante__icontains=nome)
return render_to_response('buscapart.html',{'part':part,'usuario':usuario,}, context_instance=RequestContext(request))
else:
return render_to_response('buscapart.html',{'usuario':usuario}, context_instance=RequestContext(request))
#Busca de professor
@login_required
def busca_professor(request):
usuario = request.user
if request.method =='POST':
nome = request.POST['nome']
prof = Professor.objects.filter(nomeProfessor__icontains=nome)
return render_to_response('busca_prof.html',{'prof':prof}, context_instance=RequestContext(request))
else:
return render_to_response('busca_prof.html', context_instance=RequestContext(request))
@login_required
def filtrar_area_gerencia(request,codigo):
usuario = request.user
if request.method == "GET":
projeto = Projeto.objects.filter(area = codigo)
cont = Projeto.objects.filter(situacao="Aprovado", area=codigo).count()
return render_to_response('gerencia_area.html',{'projeto':projeto,'cont':cont,'usuario':usuario},context_instance=RequestContext(request))
else:
return render_to_response('404.html')
#emitir o certificado do professor
@login_required
def emitir_prof(request, codigo):
usuario = request.user
x = get_object_or_404(Professor, pk=codigo)
try:
certificado_prof = CertificadoProfessor.objects.get(professor = codigo)
except ObjectDoesNotExist:
certificado_prof = CertificadoProfessor.objects.create(professor = x)
if request.method =='POST':
f= DadosCertModelFormProfessor(request.POST,instance = certificado_prof)
if f.is_valid():
c = f.save()
return HttpResponseRedirect(reverse('feitec.projeto.views.certificado_prof', args=[x.codProfessor, c.codCert]))
else:
render_to_response(f.errors)
else:
f= DadosCertModelFormProfessor(instance = certificado_prof)
return render_to_response('dados_certificado_prof.html', {'c':x, 'f':f,'usuario':usuario}, context_instance=RequestContext(request))
#gerar pdf certificado do professor
@login_required
def certificado_prof(request,codigo, codCert):
response = HttpResponse (mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=certificado.pdf'
buffer = StringIO()
p = canvas.Canvas(buffer)
d = get_object_or_404(CertificadoProfessor, pk=codCert)
c = get_object_or_404(Professor, pk=codigo)
e = get_object_or_404(Projeto, nomeProj=c.projeto)
projeto = str (e.nomeProj)
area = str (e.area)
dia = str (d.data)
mes = str (d.mes)
ano = str (d.ano)
p.translate(inch,inch)
p.setFont("Helvetica", 14)
p.rotate(90)
artur = os.path.join(settings.PROJECT_ROOT_PATH,"carlos.png")
jeff = os.path.join(settings.PROJECT_ROOT_PATH,"jeff.png")
p.drawImage(artur, 1*inch,-400, width=91,height=50)
p.drawImage(jeff, 7*inch,-400, width=180,height=50)
p.setFont("Helvetica-BoldOblique", 12)
p.drawString(1*inch, -220, "Certificamos que " + c.nomeProfessor + " coordenou o projeto " + projeto + ", área temática " + area + " ")
p.drawString(0.5*inch, -240, "apresentado na FEITEC 2011 – Feira de Ciência Tecnologia e Inovação do IFF – Campus Campos Centro,")
p.drawString(0.5*inch, -260, " realizada na 19ª Semana do Saber Fazer Saber, nos dias 7,8 e 9 de setembro de 2011.")
p.drawString(6*inch, -290, "Campos dos Goytacazes, " + dia + " de " + mes + " de " + ano)
p.drawString(0.5*inch, -415, "Carlos Artur de Carvalho Arêas")
p.setFont("Helvetica-Oblique", 12)
p.drawString(0.5*inch, -430, "Diretor do Departamento de Desenvolvimento")
p.drawString(0.5*inch, -445, "Institucional e Extensão Campus Campos-Centro")
p.setFont("Helvetica-BoldOblique", 12)
p.drawString(7*inch, -415, "Jefferson Manhães de Azevedo")
p.setFont("Helvetica-Oblique", 12)
p.drawString(7*inch, -430, "Diretor Geral do IFF")
p.drawString(7*inch, -445, "Campus Campos-Centro")
p.setFont("Helvetica-Oblique", 8)
p.drawString(8*inch,-500, "O presente certificado foi registrado sob o n." + d.numeroCert)
p.drawString(8*inch, -510, "No Livro n. " + d.livroCert + " em " + d.dataCert)
p.showPage()
p.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
#emitir certificado do participante
@login_required
def emitir(request, codigo):
usuario = request.user
x = get_object_or_404(Integrante, pk=codigo)
try:
certificado = CertificadoIntegrante.objects.get(integrante = codigo)
except ObjectDoesNotExist:
certificado = CertificadoIntegrante.objects.create(integrante = x)
if request.method =='POST':
f= DadosCertModelFormIntegrante(request.POST,instance = certificado)
if f.is_valid():
c = f.save()
return HttpResponseRedirect(reverse('feitec.projeto.views.certificado', args=[x.codIntegrante, c.codCert]))
else:
render_to_response(f.errors)
else:
f= DadosCertModelFormIntegrante(instance = certificado)
return render_to_response('dadoscertificado.html', {'c':x, 'f':f,'usuario':usuario,}, context_instance=RequestContext(request))
#gerar pdf do certificado do participante
@login_required
def certificado(request,codigo, codCert):
response = HttpResponse (mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=certificado.pdf'
buffer = StringIO()
p = canvas.Canvas(buffer)
d = get_object_or_404(CertificadoIntegrante, pk=codCert)
c = get_object_or_404(Integrante, pk=codigo)
e = get_object_or_404(Projeto, nomeProj=c.projeto)
projeto = str (e.nomeProj)
area = str (e.area)
dia = str (d.data)
mes = str (d.mes)
ano = str (d.ano)
p.translate(inch,inch)
p.setFont("Helvetica", 14)
p.rotate(90)
image = os.path.join("Certificado.png")
artur = os.path.join("carlos.png")
jeff = os.path.join("jeff.png")
#p.drawImage(image, -1.0*inch,-550, width=840,height=620)
p.drawImage(artur, 1*inch,-400, width=91,height=50)
p.drawImage(jeff, 7*inch,-400, width=180,height=50)
#p.drawImage(image, -1.0*inch,-550, width=840,height=620)
p.setFont("Helvetica-BoldOblique", 12)
p.drawString(1*inch, -220, "Certificamos que " + c.nomeIntegrante + " participou do projeto " + projeto + ", área temática " + area + " ")
p.drawString(0.5*inch, -240, "apresentado na FEITEC 2011 – Feira de Ciência Tecnologia e Inovação do IFF – Campus Campos Centro,")
p.drawString(0.5*inch, -260, " realizada na 19ª Semana do Saber Fazer Saber, nos dias 7,8 e 9 de setembro de 2011.")
p.setFont("Helvetica-BoldOblique", 12)
p.drawString(6*inch, -290, "Campos dos Goytacazes, " + dia + " de " + mes + " de " + ano)
p.setFont("Helvetica-Oblique", 12)
p.drawString(0.5*inch, -415, "Carlos Artur de Carvalho Arêas")
p.drawString(0.5*inch, -430, "Diretor do Departamento de Desenvolvimento")
p.drawString(0.5*inch, -445, "Institucional e Extensão Campus Campos-Centro")
p.setFont("Helvetica-BoldOblique", 12)
p.drawString(7*inch, -415, "Jefferson Manhães de Azevedo")
p.drawString(7*inch, -430, "Diretor Geral do IFF")
p.drawString(7*inch, -445, "Campus Campos-Centro")
p.setFont("Helvetica-Oblique", 8)
p.drawString(8*inch,-490, "O presente certificado foi registrado sob o n." + d.numeroCert)
p.drawString(8*inch, -500, "No Livro n. " + d.livroCert + " em " + d.dataCert)
p.showPage()
p.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
class ProjetoModelForm(ModelForm):
class Meta:
exclude = ('salaProj','blocoProj','enviar')
model = Projeto
widgets={
'criador':HiddenInput(attrs={'size':50}),
'situacao':HiddenInput(attrs={'size':50}),
#'salaProj' : HiddenInput(),
#'blocoProj' : HiddenInput(),
#'enviar' : HiddenInput(),
}
class ProfessorModelForm(ModelForm):
class Meta:
model = Professor
class IntegranteModelForm(ModelForm):
class Meta:
model = Integrante
widgets={
'situacaoIntegrante':HiddenInput(attrs={'size':50}),
}
class DadosCertModelFormIntegrante(ModelForm):
class Meta:
model = CertificadoIntegrante
widgets = {
'integrante':HiddenInput(),
}
class DadosCertModelFormProfessor(ModelForm):
class Meta:
model = CertificadoProfessor
widgets = {
'professor':HiddenInput(),
}
class ProjetoGerenciaForm(ModelForm):
class Meta:
exclude = ('enviar')
model = Projeto
widgets = {
'criador': HiddenInput(attrs={'size':50}),
'situacao': HiddenInput(attrs={'size':50}),
'area': HiddenInput(),
'nomeProj' : HiddenInput(),
'resumoProj' : HiddenInput(),
'materialProj' : HiddenInput(),
'campusProj' : HiddenInput(),
'equipamentoProj' : HiddenInput(),
#'enviar' : HiddenInput(),
}
class ProjetoGerenciaModelForm(ModelForm):
class Meta:
model = Projeto
widgets = {
'criador': HiddenInput(attrs={'size':50}),
'situacao': HiddenInput(attrs={'size':50}),
'area': HiddenInput(),
'nomeProj' : HiddenInput(),
'resumoProj' : HiddenInput(),
'materialProj' : HiddenInput(),
'campusProj' : HiddenInput(),
'equipamentoProj' : HiddenInput(),
'enviar' : HiddenInput(),
}
class SituacaoIntModelForm(ModelForm):
class Meta:
model = Integrante
widgets = {
'codIntegrante' : HiddenInput(),
'nomeIntegrante': HiddenInput(),
'cpfIntegrante': HiddenInput(),
'matriculaIntegrante' : HiddenInput(),
'cursoIntegrante': HiddenInput(),
'tamanhocamisaIntegrante': HiddenInput(),
'projeto': HiddenInput(),
'campusIntegrante':HiddenInput(),
}
|
import json
def is_json(myjson):
try:
test = json.dumps(myjson)
json.loads(test)
except ValueError:
return False
return True
|
""" Auth Utils
"""
import cloudstorage as gcs
import endpoints
import json
import os
import webapp2
import logging
import re
from datetime import datetime
from datetime import date
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import urlfetch
from apiclient.discovery import build
from models import ActivityPost
from models import ActivityRecord
from models import Account
from models import activity_record as ar
# NON VALID ACCOUNT TYPES (FOR HARVESTING) : ['deleted', 'administrator']
VALID_ACCOUNT_TYPES = ['active', 'gde', 'marketing', 'productstategy', 'ux_ui']
my_default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(my_default_retry_params)
def get_so_api_key():
bucket = '/' + os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name())
secrets_file = None
try:
secrets_file = gcs.open(bucket + '/' + 'secrets.json', 'r')
except gcs.NotFoundError:
logging.error('secrets.json not found in default bucket')
return None
secrets = json.loads(secrets_file.read())
return secrets.get('so_api_key')
def get_admin_api_key():
bucket = '/' + os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name())
secrets_file = None
try:
secrets_file = gcs.open(bucket + '/' + 'secrets.json', 'r')
except gcs.NotFoundError:
logging.error('secrets.json not found in default bucket')
return None
secrets = json.loads(secrets_file.read())
return secrets.get('admin_api_key')
def get_server_api_key():
bucket = '/' + os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name())
secrets_file = None
try:
secrets_file = gcs.open(bucket + '/' + 'secrets.json', 'r')
except gcs.NotFoundError:
logging.error('secrets.json not found in default bucket')
return None
secrets = json.loads(secrets_file.read())
return secrets.get('server_api_key')
def _getUserId():
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
def get_current_account():
"""Retrieve the Account entity associated with the current user."""
user = endpoints.get_current_user()
if user is None:
logging.info('get_current_user returned none')
return None
email = user.email().lower()
logging.info(email)
# Try latest recorded auth_email first
accounts = Account.query(Account.auth_email == email).fetch(1)
if len(accounts) > 0:
logging.info('auth_email returns')
return accounts[0]
# Try the user's default email next
accounts = Account.query(Account.email == email).fetch(1)
if len(accounts) > 0:
# Store auth email for next time
accounts[0].auth_email = email
accounts[0].put()
logging.info('default email returns')
return accounts[0]
# Try via the user's Google ID
user_id = _getUserId()
accounts = Account.query(Account.gplus_id == user_id).fetch(1)
if len(accounts) > 0:
# Store auth email for next time
accounts[0].auth_email = email
accounts[0].put()
logging.info('Google ID returns')
return accounts[0]
logging.info('None returns')
return None
def check_auth(gplus_id, api_key):
# Check against API Key for maintainance script
if api_key is not None:
if api_key == get_admin_api_key():
return True
# check authenticated user
user = get_current_account()
logging.info(user)
if user is not None:
# Users can change their own data
if user.gplus_id == gplus_id:
logging.info('its a user')
return True
# Administrators can change everything
if user.type == 'administrator':
logging.info('its an administrator')
return True
# We could do further checks here, depending on user.type, e.g.
# "disabled" Experts are not allowed
# Only allow active Experts to enter data
return False
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.