content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python3
import argparse
import logging
from pathlib import Path
import sys
from typing import Iterable
from typing import Union
import numpy as np
from espnet.utils.cli_utils import get_commandline_args
def aggregate_stats_dirs(
input_dir: Iterable[Union[str, Path]],
output_dir: Union[str, Path],
log_level: str,
skip_sum_stats: bool,
):
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) (levelname)s: %(message)s",
)
input_dirs = [Path(p) for p in input_dir]
output_dir = Path(output_dir)
for mode in ["train", "valid"]:
with (input_dirs[0] / mode / "batch_keys").open("r", encoding="utf-8") as f:
batch_keys = [line.strip() for line in f if line.strip() != ""]
with (input_dirs[0] / mode / "stats_keys").open("r", encoding="utf-8") as f:
stats_keys = [line.strip() for line in f if line.strip() != ""]
(output_dir / mode).mkdir(parents=True, exist_ok=True)
for key in batch_keys:
with (output_dir / mode / f"{key}_shape").open(
"w", encoding="utf-8"
) as fout:
for idir in input_dirs:
with (idir / mode / f"{key}_shape").open(
"r", encoding="utf-8"
) as fin:
# Read to the last in order to sort keys
# because the order can be changed if num_workers>=1
lines = fin.readlines()
lines = sorted(lines, key=lambda x: x.split()[0])
for line in lines:
fout.write(line)
for key in stats_keys:
if not skip_sum_stats:
sum_stats = None
for idir in input_dirs:
stats = np.load(idir / mode / f"{key}_stats.npz")
if sum_stats is None:
sum_stats = dict(**stats)
else:
for k in stats:
sum_stats[k] += stats[k]
np.savez(output_dir / mode / f"{key}_stats.npz", **sum_stats)
# if --write_collected_feats=true
p = Path(mode) / "collect_feats" / f"{key}.scp"
scp = input_dirs[0] / p
if scp.exists():
(output_dir / p).parent.mkdir(parents=True, exist_ok=True)
with (output_dir / p).open("w", encoding="utf-8") as fout:
for idir in input_dirs:
with (idir / p).open("r", encoding="utf-8") as fin:
for line in fin:
fout.write(line)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Aggregate statistics directories into one directory",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--skip_sum_stats",
default=False,
action="store_true",
help="Skip computing the sum of statistics.",
)
parser.add_argument("--input_dir", action="append", help="Input directories")
parser.add_argument("--output_dir", required=True, help="Output directory")
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
aggregate_stats_dirs(**kwargs)
if __name__ == "__main__":
main()
| nilq/baby-python | python |
import argparse
import parmed as pmd
def merge_gro_files(prot_gro, lig_gro, cmplx_gro):
prot = pmd.load_file(prot_gro)
lig = pmd.load_file(lig_gro)
cmplx = prot + lig
cmplx.save(cmplx_gro)
def merge_top_files(prot_top, lig_top, cmplx_top):
with open(lig_top, 'r') as f:
lig_top_sections = f.read().split('\n[')
# open ligand topology
for n in range(len(lig_top_sections)):
if 'atomtypes' in lig_top_sections[n][:10]:
lig_atomtypes = lig_top_sections[n]
del lig_top_sections[n]
break
else:
lig_atomtypes = None
lig_top_updated = '\n['.join(lig_top_sections)
# open protein topology
with open(prot_top, 'r') as f:
prot_top_combined = f.read()
if lig_atomtypes:
prot_top_sections = prot_top_combined.split('[ moleculetype ]\n')
prot_top_combined = (prot_top_sections[0] +
'; Include ligand atomtypes\n[' +
lig_atomtypes +
'\n[ moleculetype ]\n' +
prot_top_sections[1])
prot_top_sections = prot_top_combined.split('; Include water topology')
prot_top_combined = (prot_top_sections[0] +
'; Include ligand topology\n' +
lig_top_updated +
'\n; Include water topology' +
prot_top_sections[1])
prot_top_combined += 'base 1\n'
# save complex topology
with open(cmplx_top, 'w') as f:
f.write(prot_top_combined)
def main():
parser = argparse.ArgumentParser(
description='Perform SMD runs for dynamic undocking')
parser.add_argument('--lig-top', help='Ligand TOP file.')
parser.add_argument('--prot-top', help='Protein TOP file.')
parser.add_argument('--lig-gro', help='Ligand GRO file.')
parser.add_argument('--prot-gro', help='Protein GRO file.')
parser.add_argument('--complex-top', help='Complex TOP file.')
parser.add_argument('--complex-gro', help='Complex GRO file.')
args = parser.parse_args()
merge_gro_files(args.prot_gro, args.lig_gro, args.complex_gro)
merge_top_files(args.prot_top, args.lig_top, args.complex_top)
if __name__ == "__main__":
main()
| nilq/baby-python | python |
dictionary = {"name": "Shahjalal", "ref": "Python", "sys": "Mac"}
for key, value in dictionary.items():
print key, " = ", value
| nilq/baby-python | python |
def get_sum_by_route(route_val, nums):
sum_val = nums[0][0]
j = 0
route=[sum_val]
for i in range(1, len(nums)):
if route_val % 2 > 0:
j+=1
sum_val += nums[i][j]
route.append(nums[i][j])
route_val >>= 1
return route, sum_val
s = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
lines = s.splitlines()
nums = []
for line in lines:
line_list = [int(i) for i in line.split(' ')]
nums.append(line_list)
possible_route = 2 ** (len(nums) - 1)
print("Possible routs: ", possible_route)
max_sum = 0
for i in range(possible_route):
route, sum_val = get_sum_by_route(i, nums)
if sum_val > max_sum:
print("Max route updated", i)
print("Route: ", route)
max_sum = sum_val
print(max_sum) | nilq/baby-python | python |
import numpy as np
from sort import algs
def test_bubblesort():
# 1) Test odd-sized vector + duplicate values
assert algs.bubblesort([1,2,4,0,1]) == [0,1,1,2,4]
# 2) Test even+duplicate values
assert algs.bubblesort([1,2,4,6,0,1]) == [0,1,1,2,4,6]
# 3) Test empty vector
assert algs.bubblesort([]) == []
# 4) Test single-element vectors
assert algs.bubblesort([1]) == [1]
# 5) Test single-value vectors
assert algs.bubblesort([1,1,1,1,1,1,1,1]) == [1,1,1,1,1,1,1,1]
# 6) Test vectors with negative values
assert algs.bubblesort([-2,-6,8,9,-4]) == [-6,-4,-2,8,9]
# 7) Test ordered and reverse-order lists of large size
assert algs.bubblesort(range(1000)) == range(1000)
assert algs.bubblesort(list(reversed(range(1000)))) == list(range(1000))
# 8) Test vector of strings
assert algs.bubblesort(["will", "this", "work"]) == ["this", "will", "work"]
def test_quicksort():
# 1) Test odd-sized vector + duplicate values
assert algs.quicksort([1,2,4,0,1]) == [0,1,1,2,4]
# 2) Test even+duplicate values
assert algs.quicksort([1,2,4,6,0,1]) == [0,1,1,2,4,6]
# 3) Test empty vector
assert algs.quicksort([]) == []
# 4) Test single-element vectors
assert algs.quicksort([1]) == [1]
# 5) Test single-value vectors
assert algs.quicksort([1,1,1,1,1,1,1,1]) == [1,1,1,1,1,1,1,1]
# 6) Test vectors with negative values
assert algs.quicksort([-2,-6,8,9,-4]) == [-6,-4,-2,8,9]
# 7) Test ordered and reverse-order lists of large size
assert algs.quicksort(range(1000)) == list(range(1000))
assert algs.quicksort(list(reversed(range(1000)))) == list(range(1000))
# 8) Test vector of strings
assert algs.quicksort(["will", "this", "work"]) == ["this", "will", "work"]
| nilq/baby-python | python |
from service.resolver_base import ResolverBase
from service.rule_item_mutex import RuleItemMutex
# 6宫无马数独
# DB 互斥规则已写入
class Resolver1623(ResolverBase):
ANSWER_RANGE = ['1', '2', '3', '4', '5', '6']
def get_answer_range(self) -> []:
return Resolver1623.ANSWER_RANGE
def calculate_rules(self):
super().calculate_rules()
self.question_data.rules_list = [
RuleItemMutex(self.question_data, '0,0;0,1;0,2;0,3;0,4;0,5'),
RuleItemMutex(self.question_data, '0,0;0,1;0,2;0,3;0,4;0,5'),
RuleItemMutex(self.question_data, '1,0;1,1;1,2;1,3;1,4;1,5'),
RuleItemMutex(self.question_data, '2,0;2,1;2,2;2,3;2,4;2,5'),
RuleItemMutex(self.question_data, '3,0;3,1;3,2;3,3;3,4;3,5'),
RuleItemMutex(self.question_data, '4,0;4,1;4,2;4,3;4,4;4,5'),
RuleItemMutex(self.question_data, '5,0;5,1;5,2;5,3;5,4;5,5'),
RuleItemMutex(self.question_data, '0,0;1,0;2,0;3,0;4,0;5,0'),
RuleItemMutex(self.question_data, '0,1;1,1;2,1;3,1;4,1;5,1'),
RuleItemMutex(self.question_data, '0,2;1,2;2,2;3,2;4,2;5,2'),
RuleItemMutex(self.question_data, '0,3;1,3;2,3;3,3;4,3;5,3'),
RuleItemMutex(self.question_data, '0,4;1,4;2,4;3,4;4,4;5,4'),
RuleItemMutex(self.question_data, '0,5;1,5;2,5;3,5;4,5;5,5'),
RuleItemMutex(self.question_data, '0,0;1,0;2,0;0,1;1,1;2,1'),
RuleItemMutex(self.question_data, '3,0;4,0;5,0;3,1;4,1;5,1'),
RuleItemMutex(self.question_data, '0,2;1,2;2,2;0,3;1,3;2,3'),
RuleItemMutex(self.question_data, '3,2;4,2;5,2;3,3;4,3;5,3'),
RuleItemMutex(self.question_data, '0,4;1,4;2,4;0,5;1,5;2,5'),
RuleItemMutex(self.question_data, '3,4;4,4;5,4;3,5;4,5;5,5'),
# 1
RuleItemMutex(self.question_data, '0,0;1,2'),
RuleItemMutex(self.question_data, '0,1;1,3'),
RuleItemMutex(self.question_data, '0,2;1,4'),
RuleItemMutex(self.question_data, '0,3;1,5'),
RuleItemMutex(self.question_data, '1,0;2,2'),
RuleItemMutex(self.question_data, '1,1;2,3'),
RuleItemMutex(self.question_data, '1,2;2,4'),
RuleItemMutex(self.question_data, '1,3;2,5'),
RuleItemMutex(self.question_data, '2,0;3,2'),
RuleItemMutex(self.question_data, '2,1;3,3'),
RuleItemMutex(self.question_data, '2,2;3,4'),
RuleItemMutex(self.question_data, '2,3;3,5'),
RuleItemMutex(self.question_data, '3,0;4,2'),
RuleItemMutex(self.question_data, '3,1;4,3'),
RuleItemMutex(self.question_data, '3,2;4,4'),
RuleItemMutex(self.question_data, '3,3;4,5'),
RuleItemMutex(self.question_data, '4,0;5,2'),
RuleItemMutex(self.question_data, '4,1;5,3'),
RuleItemMutex(self.question_data, '4,2;5,4'),
RuleItemMutex(self.question_data, '4,3;5,5'),
RuleItemMutex(self.question_data, '0,0;2,1'),
RuleItemMutex(self.question_data, '1,0;3,1'),
RuleItemMutex(self.question_data, '2,0;4,1'),
RuleItemMutex(self.question_data, '3,0;5,1'),
# 2
RuleItemMutex(self.question_data, '0,1;2,2'),
RuleItemMutex(self.question_data, '1,1;3,2'),
RuleItemMutex(self.question_data, '2,1;4,2'),
RuleItemMutex(self.question_data, '3,1;5,2'),
RuleItemMutex(self.question_data, '0,2;2,3'),
RuleItemMutex(self.question_data, '1,2;3,3'),
RuleItemMutex(self.question_data, '2,2;4,3'),
RuleItemMutex(self.question_data, '3,2;5,3'),
RuleItemMutex(self.question_data, '0,3;2,4'),
RuleItemMutex(self.question_data, '1,3;3,4'),
RuleItemMutex(self.question_data, '2,3;4,4'),
RuleItemMutex(self.question_data, '3,3;5,4'),
RuleItemMutex(self.question_data, '0,4;2,5'),
RuleItemMutex(self.question_data, '1,4;3,5'),
RuleItemMutex(self.question_data, '2,4;4,5'),
RuleItemMutex(self.question_data, '3,4;5,5'),
# 3
RuleItemMutex(self.question_data, '0,1;2,0'),
RuleItemMutex(self.question_data, '1,1;3,0'),
RuleItemMutex(self.question_data, '2,1;4,0'),
RuleItemMutex(self.question_data, '3,1;5,0'),
RuleItemMutex(self.question_data, '0,2;2,1'),
RuleItemMutex(self.question_data, '1,2;3,1'),
RuleItemMutex(self.question_data, '2,2;4,1'),
RuleItemMutex(self.question_data, '3,2;5,1'),
RuleItemMutex(self.question_data, '0,3;2,2'),
RuleItemMutex(self.question_data, '1,3;3,2'),
RuleItemMutex(self.question_data, '2,3;4,2'),
RuleItemMutex(self.question_data, '3,3;5,2'),
RuleItemMutex(self.question_data, '0,4;2,3'),
RuleItemMutex(self.question_data, '1,4;3,3'),
RuleItemMutex(self.question_data, '2,4;4,3'),
RuleItemMutex(self.question_data, '3,4;5,3'),
RuleItemMutex(self.question_data, '0,5;2,4'),
RuleItemMutex(self.question_data, '1,5;3,4'),
RuleItemMutex(self.question_data, '2,5;4,4'),
RuleItemMutex(self.question_data, '3,5;5,4'),
# 4
RuleItemMutex(self.question_data, '0,2;1,0'),
RuleItemMutex(self.question_data, '1,2;2,0'),
RuleItemMutex(self.question_data, '2,2;3,0'),
RuleItemMutex(self.question_data, '3,2;4,0'),
RuleItemMutex(self.question_data, '4,2;5,0'),
RuleItemMutex(self.question_data, '0,3;1,1'),
RuleItemMutex(self.question_data, '1,3;2,1'),
RuleItemMutex(self.question_data, '2,3;3,1'),
RuleItemMutex(self.question_data, '3,3;4,1'),
RuleItemMutex(self.question_data, '4,3;5,1'),
RuleItemMutex(self.question_data, '0,4;1,2'),
RuleItemMutex(self.question_data, '1,4;2,2'),
RuleItemMutex(self.question_data, '2,4;3,2'),
RuleItemMutex(self.question_data, '3,4;4,2'),
RuleItemMutex(self.question_data, '4,4;5,2'),
RuleItemMutex(self.question_data, '0,5;1,3'),
RuleItemMutex(self.question_data, '1,5;2,3'),
RuleItemMutex(self.question_data, '2,5;3,3'),
RuleItemMutex(self.question_data, '3,5;4,3'),
RuleItemMutex(self.question_data, '4,5;5,3'),
]
def calculate_editable_original_data(self):
super().calculate_editable_original_data()
for y_index in range(len(self.question_data.editable_original_data)):
for x_index in range(len(self.question_data.editable_original_data[y_index])):
if self.question_data.editable_original_data[y_index][x_index] == '':
self.question_data.editable_original_data[y_index][x_index] = '#'
| nilq/baby-python | python |
from specusticc.data_preprocessing.preprocessed_data import PreprocessedData
from specusticc.model_testing.prediction_results import PredictionResults
class Tester:
def __init__(self, model, model_name: str, data: PreprocessedData):
self._model = model
self._data: PreprocessedData = data
self._model_name = model_name
self.prediction_results: PredictionResults = PredictionResults()
def test(self):
train_set = self._data.train_set
input_data = train_set.get_input(self._model_name)
output_data = train_set.get_output()
self.prediction_results.train_output = self._model.predict(input_data)
print("Evaluate on train data")
self._model.evaluate(input_data, output_data, batch_size=128)
test_sets = self._data.test_sets
self.prediction_results.test_output = []
for test_set in test_sets:
input_data = test_set.get_input(self._model_name)
output_data = test_set.get_output()
prediction = self._model.predict(input_data)
self.prediction_results.test_output.append(prediction)
print("Evaluate on test data")
self._model.evaluate(input_data, output_data, batch_size=128)
def get_test_results(self) -> PredictionResults:
return self.prediction_results
| nilq/baby-python | python |
from .encodeClass import encoderClass
from .decodeClass import decoderClass
| nilq/baby-python | python |
import os
import uuid
from typing import Generator
from flask import current_app
from unittest import TestCase
from contextlib import contextmanager
from alembic import command
from sqlalchemy import create_engine
from {{ cookiecutter.app_name }} import app
from {{ cookiecutter.app_name }}.extensions import db
DATABASE_URI_FORMATTER = 'postgresql://{username}:{password}@postgres:5432/{database}'
@contextmanager
def provision_database(config :dict) -> Generator[None, None, None]:
database_name = str(uuid.uuid4()).replace('-', '_')
postgres_database_uri = DATABASE_URI_FORMATTER.format(username='postgres', password=os.environ['PGPASSWORD'], database='postgres')
app_database_uri = DATABASE_URI_FORMATTER.format(username='{{ cookiecutter.app_name }}', password=os.environ['APP_PASSWORD'], database=database_name)
migrate_database_uri = DATABASE_URI_FORMATTER.format(username='migrator', password=os.environ['MIGRATOR_PASSWORD'], database=database_name)
engine = create_engine(postgres_database_uri, isolation_level='AUTOCOMMIT')
connection = engine.connect()
connection.execute(f'create database "{database_name}" with owner migrator template template0 encoding "UTF-8"')
config.update({
'SQLALCHEMY_DATABASE_URI': app_database_uri,
'SQLALCHEMY_DATABASE_MIGRATION_URI': migrate_database_uri,
})
context = app.create_app().test_request_context()
context.push()
config = current_app.extensions['migrate'].migrate.get_config(directory=None, x_arg=None)
command.upgrade(config, revision='head', sql=False, tag=None)
yield
# make sure all held connections are destroyed before dropping the database
db.session.remove()
db.engine.dispose()
context.pop()
connection.execute(f'drop database "{database_name}"')
connection.close()
class IntegrationTestCase(TestCase):
# override in test cases for custom test configuration
custom_test_config = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = None
def run(self, result=None):
# initialize global test configuration here...
global_test_config = {}
global_test_config.update(self.custom_test_config or {})
with provision_database(global_test_config):
self.client = current_app.test_client()
super().run(result)
| nilq/baby-python | python |
# #https://docs.pytest.org/en/reorganize-docs/new-docs/user/assert_statements.html
# # Assertions are the condition or boolean expression which are always supposed to be true
# import pytest
# def vowels():
# return set('aeiou')
# @pytest.mark.skip
# def test_vowels():
# result = vowels()
# expected = set('aeiou')
# print ("this test has run")
# assert result == expected | nilq/baby-python | python |
#
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: December 29, 2020
#
"""It is utilities for Gaussian process regression and
Student-:math:`t` process regression."""
import numpy as np
from bayeso.utils import utils_common
from bayeso import constants
@utils_common.validate_types
def get_prior_mu(prior_mu: constants.TYPING_UNION_CALLABLE_NONE, X: np.ndarray) -> np.ndarray:
"""
It computes the prior mean function values over inputs X.
:param prior_mu: prior mean function or None.
:type prior_mu: function or NoneType
:param X: inputs for prior mean function. Shape: (n, d) or (n, m, d).
:type X: numpy.ndarray
:returns: zero array, or array of prior mean function values. Shape: (n, 1).
:rtype: numpy.ndarray
:raises: AssertionError
"""
assert isinstance(X, np.ndarray)
assert callable(prior_mu) or prior_mu is None
assert len(X.shape) == 2 or len(X.shape) == 3
if prior_mu is None:
prior_mu_X = np.zeros((X.shape[0], 1))
else:
prior_mu_X = prior_mu(X)
assert len(prior_mu_X.shape) == 2
assert X.shape[0] == prior_mu_X.shape[0]
return prior_mu_X
@utils_common.validate_types
def validate_common_args(X_train: np.ndarray, Y_train: np.ndarray,
str_cov: str, prior_mu: constants.TYPING_UNION_CALLABLE_NONE,
debug: bool,
X_test: constants.TYPING_UNION_ARRAY_NONE=None,
) -> constants.TYPE_NONE:
"""
It validates the common arguments for various functions.
:param X_train: inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param str_cov: the name of covariance function.
:type str_cov: str.
:param prior_mu: None, or prior mean function.
:type prior_mu: NoneType, or function
:param debug: flag for printing log messages.
:type debug: bool.
:param X_test: inputs or None. Shape: (l, d) or (l, m, d).
:type X_test: numpy.ndarray, or NoneType, optional
:returns: None.
:rtype: NoneType
:raises: AssertionError
"""
assert isinstance(X_train, np.ndarray)
assert isinstance(Y_train, np.ndarray)
assert isinstance(str_cov, str)
assert callable(prior_mu) or prior_mu is None
assert isinstance(debug, bool)
assert len(X_train.shape) == 2 or len(X_train.shape) == 3
assert len(Y_train.shape) == 2
assert X_train.shape[0] == Y_train.shape[0]
assert isinstance(X_test, (np.ndarray, type(None)))
if X_test is not None:
if len(X_train.shape) == 2:
assert X_train.shape[1] == X_test.shape[1]
else:
assert X_train.shape[2] == X_test.shape[2]
| nilq/baby-python | python |
import rclpy,numpy,psutil
from rclpy.node import Node
from std_msgs.msg import Float32
class RpiMon(Node):
def __init__(self):
super().__init__('rpi_mon')
self.ramPublisher = self.create_publisher(Float32, 'freeram', 1)
timer_period = 2.0 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def timer_callback(self):
msg = Float32()
msg.data = 100.0-psutil.virtual_memory()[2]
self.ramPublisher.publish(msg)
def main(args=None):
print('Hi from rpi_mon.')
rclpy.init(args=args)
rpi_mon = RpiMon()
rclpy.spin(rpi_mon)
rpi_mon.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| nilq/baby-python | python |
from libspn.inference.type import InferenceType
from libspn.graph.op.base_sum import BaseSum
import libspn.utils as utils
@utils.register_serializable
class Sum(BaseSum):
"""A node representing a single sum in an SPN.
Args:
*values (input_like): Inputs providing input values to this node.
See :meth:`~libspn.Input.as_input` for possible values.
weights (input_like): Input providing weights node to this sum node.
See :meth:`~libspn.Input.as_input` for possible values. If set
to ``None``, the input is disconnected.
latent_indicators (input_like): Input providing IndicatorLeaf of an explicit latent variable
associated with this sum node. See :meth:`~libspn.Input.as_input`
for possible values. If set to ``None``, the input is disconnected.
name (str): Name of the node.
Attributes:
inference_type(InferenceType): Flag indicating the preferred inference
type for this node that will be used
during value calculation and learning.
Can be changed at any time and will be
used during the next inference/learning
op generation.
"""
def __init__(self, *values, weights=None, latent_indicators=None,
inference_type=InferenceType.MARGINAL,
sample_prob=None, name="Sum"):
super().__init__(
*values, num_sums=1, weights=weights, latent_indicators=latent_indicators,
inference_type=inference_type, sample_prob=sample_prob, name=name)
| nilq/baby-python | python |
##############################################
##############################################
###### Predict the Bear ######################
# Flask app that uses a model trained with the Fast.ai v2 library
# following an example in the upcoming book "Deep Learning for Coders
# with fastai and PyTorch: AI Applications Without a PhD" by
# Jeremy Howard and Sylvain Gugger.
##############################################
# Project put together by Javier Ideami
# Email: ideami@ideami.com
# Web: ideami.com
##############################################
import numpy as np
from flask import Flask, request, render_template
import pickle
from resources.utils import *
from fastai2.vision.widgets import *
from fastai2.imports import *
import os
cwd = os.getcwd()
path = Path()
Path().ls(file_exts='.pkl')
application = Flask(__name__)
model = load_learner(path/'model/export.pkl')
#Defining the home page for the web service
@application.route('/')
def home():
return render_template('index.html')
#Writing api for inference using the loaded model
@application.route('/predict',methods=['POST'])
#Predict method that uses the trained model to predict the kind of bear in the picture we uploaded
def predict():
#labels = ['grizzly','black','teddy']
file = request.files['file']
#Store the uploaded images in a temporary folder
if file:
filename = file.filename
file.save(os.path.join("resources/tmp", filename))
to_predict = "resources/tmp/"+filename
#Getting the prediction from the model
prediction=model.predict(to_predict)
#Render the result in the html template
return render_template('index.html', prediction_text='Your Prediction : {} '.format(prediction[0]))
if __name__ == "__main__":
#run the application
application.run(host='0.0.0.0')
| nilq/baby-python | python |
import numpy as np
from nexpy.gui.datadialogs import NXDialog, GridParameters
from nexpy.gui.utils import report_error
from nexusformat.nexus import NXfield, NXdata, NeXusError
from nexusformat.nexus.tree import centers
def show_dialog():
try:
dialog = ConvertDialog()
dialog.show()
except NeXusError as error:
report_error("Converting to (Q,E)", error)
class ConvertDialog(NXDialog):
def __init__(self, parent=None):
super(ConvertDialog, self).__init__(parent)
self.select_entry()
self.parameters = GridParameters()
self.parameters.add('Ei', self.entry['instrument/monochromator/energy'],
'Incident Energy')
self.parameters.add('dQ', self.round(np.sqrt(self.Ei/2)/50), 'Q Step')
self.parameters.add('dE', self.round(self.Ei/50), 'Energy Step')
self.set_layout(self.entry_layout,
self.parameters.grid(),
self.action_buttons(('Plot', self.plot_data),
('Save', self.save_data)),
self.close_buttons())
self.setWindowTitle('Converting to (Q,E)')
@property
def Ei(self):
return self.parameters['Ei'].value
@property
def dQ(self):
return self.parameters['dQ'].value
@property
def dE(self):
return self.parameters['dE'].value
def read_parameters(self):
self.L1 = - self.entry['sample/distance']
self.L2 = np.mean(self.entry['instrument/detector/distance'])
self.m1 = self.entry['monitor1']
self.t_m1 = self.m1.moment()
self.d_m1 = self.entry['monitor1/distance']
def convert_tof(self, tof):
ki = np.sqrt(self.Ei / 2.0721)
ts = self.t_m1 + 1588.254 * (self.L1 - self.d_m1) / ki
kf = 1588.254 * self.L2 / (tof - ts)
eps = self.Ei - 2.0721*kf**2
return eps
def convert_QE(self):
"""Convert S(phi,eps) to S(Q,eps)"""
self.read_parameters()
Ei = self.Ei
dQ = self.dQ
dE = self.dE
signal = self.entry['data'].nxsignal
pol = centers(self.entry['data/polar_angle'], signal.shape[0])
tof = centers(self.entry['data/time_of_flight'], signal.shape[1])
en = self.convert_tof(tof)
idx_max = min(np.where(np.abs(en-0.75*Ei)<0.1)[0])
en = en[:idx_max]
data = signal.nxdata[:,:idx_max]
if self.entry['data'].nxerrors:
errors = self.entry['data'].nxerrors.nxdata[:]
Q = np.zeros((len(pol), len(en)))
E = np.zeros((len(pol), len(en)))
for i in range(0,len(pol)):
p = pol[i]
Q[i,:] = np.array(np.sqrt((2*Ei - en - 2*np.sqrt(Ei*(Ei-en))
* np.cos(p*np.pi/180.0))/2.0721))
E[i,:] = np.array(en)
s = Q.shape
Qin = Q.reshape(s[0]*s[1])
Ein = E.reshape(s[0]*s[1])
datain = data.reshape(s[0]*s[1])
if self.entry['data'].nxerrors:
errorsin = errors.reshape(s[0]*s[1])
qmin = Q.min()
qmax = Q.max()
emin = E.min()
emax = E.max()
NQ = int((qmax-qmin)/dQ) + 1
NE = int((emax-emin)/dE) + 1
Qb = np.linspace(qmin, qmax, NQ)
Eb = np.linspace(emin, emax, NE)
#histogram and normalize
norm, nbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb))
hist, hbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb), weights=datain)
if self.entry['data'].nxerrors:
histe, hbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb), weights=errorsin*errorsin)
histe = histe**0.5
err = histe/norm
I = NXfield(hist/norm, name='S(Q,E)')
Qb = NXfield(Qb[:-1]+dQ/2., name='Q')
Eb = NXfield(Eb[:-1]+dE/2., name='E')
result = NXdata(I, (Eb, Qb))
if self.entry.data.nxerrors:
result.errors = NXfield(err)
return result
def round(self, x, prec=2, base=.05):
return round(base * round(float(x)/base), prec)
def plot_data(self):
self.convert_QE().plot()
def save_data(self):
self.entry['sqe'] = self.convert_QE()
| nilq/baby-python | python |
from riemann.tx import tx_builder
from riemann import simple, script
from riemann import utils as rutils
from riemann.encoding import addresses
from workshop import crypto
from workshop.transactions import spend_utxo
from riemann import tx
'''
This is a hash timelock contract. It locks BTC until a timeout, or until a
specific secret is revealed.
HTLCs are used in cross-chain swaps, and are the core primitive for updating
lightning channels. Because of this, they can also be used to build cool things
like submarine (lightning-to-mainnet) atomic swaps.
Basically, an HTLC has 2 paths: execute and refund. The execute path checks a
secret against a pre-committed digest, and validates the executor's signature.
The refund path checks a timeout, and validates the funder's signature.
This script must be parameterized with a 32 byte hash, a timeout, and both
parties' pubkeyhashes.
# WARNING: This is an example. Do not use it in production.
'''
htlc_script = \
'OP_IF ' \
'OP_SHA256 {secret_hash} OP_EQUALVERIFY ' \
'OP_DUP OP_HASH160 {pkh0} ' \
'OP_ELSE ' \
'{timeout} OP_CHECKLOCKTIMEVERIFY OP_DROP ' \
'OP_DUP OP_HASH160 {pkh1} ' \
'OP_ENDIF ' \
'OP_EQUALVERIFY ' \
'OP_CHECKSIG'
def build_htlc_script(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> str:
'''
Parameterizes the HTLC script with the arguments.
'''
if len(secret_hash) != 32:
raise ValueError('Expected a 32-byte digest. '
f'Got {len(secret_hash)} bytes')
if len(redeemer_pkh) != 20:
raise ValueError('Expected a 20-byte redeemer pubkeyhash. '
f'Got {len(redeemer_pkh)} bytes')
if len(funder_pkh) != 20:
raise ValueError('Expected a 20-byte funder pubkeyhash. '
f'Got {len(redeemer_pkh)} bytes')
return htlc_script.format(
secret_hash=secret_hash.hex(),
pkh0=rutils.sha256(redeemer_pkh).hex(),
timeout=rutils.i2le(timeout),
pkh1=rutils.sha256(funder_pkh).hex())
def htlc_address(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> str:
'''Parameterizes the script, and returns the corresponding address'''
s = build_htlc_script(secret_hash, redeemer_pkh, timeout, funder_pkh)
return addresses.make_p2wsh_address(s)
def p2htlc_output(
value: int,
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> tx.TxOut:
'''Parameterizes the script, and creates an output paying that address'''
address = htlc_address(secret_hash, redeemer_pkh, timeout, funder_pkh)
return simple.output(value, address)
def htlc_refund_witness(
htlc_script: str,
signature: bytes,
pubkey: bytes
) -> tx.InputWitness:
'''
Given a signature, creates a witness for the refund path of the HTLC
The b'\x00' corresponds to OP_FALSE
'''
serialized = script.serialize(htlc_script)
return tx_builder.make_witness([signature, pubkey, b'\x00', serialized])
def htlc_execute_witness(
htlc_script: str,
signature: bytes,
pubkey: bytes,
secret: bytes
) -> tx.InputWitness:
'''
Given a signature and the secret, makes a witness for the execute path of
the HTLC.
The b'\x01' corresponds to OP_TRUE
'''
serialized = script.serialize(htlc_script)
return tx_builder.make_witness(
[signature, pubkey, secret, b'\x01', serialized]
)
def spend_htlc_transaction(
tx_id: str,
index: int,
value: int,
address: str,
timeout: int = 0
) -> tx.Tx:
'''
Creates an unsigned txn that sends funds from an HTLC to a specified
address.
Not that this step requires knowledge only of the timeout. An exercise tx
can safely leave this at 0.
'''
tx_in = spend_utxo(tx_id, index)
tx_out = simple.output(value, address)
return simple.unsigned_witness_tx( # type: ignore
tx_ins=[tx_in],
tx_outs=[tx_out],
locktime=timeout)
def signed_refund_htlc_transaction(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes,
tx_id: str,
index: int,
prevout_value: int,
address: str,
privkey: bytes,
fee: int = 0
) -> tx.Tx:
'''
Builds an entire Refund HTLC spend from scratch.
'''
# build the unsigned version of the transaction
t = spend_htlc_transaction(
tx_id,
index,
prevout_value - fee,
address,
timeout)
# Prep the witness program
s = build_htlc_script(secret_hash, redeemer_pkh, timeout, funder_pkh)
serialized_script = script.serialize(s)
script_len = len(serialized_script)
prepended_script = tx.VarInt(script_len).to_bytes() + serialized_script
# calculate sighash using the witness program
sighash = t.sighash_all(
index=index,
script=prepended_script,
prevout_value=rutils.i2le_padded(prevout_value, 8))
# sign it and make the witness
signature = crypto.sign_digest(sighash, privkey)
witness = htlc_refund_witness(s, signature, crypto.priv_to_pub(privkey))
# insert the witness into the tx
return t.copy(tx_witnesses=[witness])
| nilq/baby-python | python |
#!/usr/bin/env python
# Part of sniffMyPackets framework.
# GeoIP Lookup modules to cut down on code changes.
import pygeoip
from canari.config import config
def lookup_geo(ip):
try:
# homelat = config['geoip/homelat'].strip('\'')
# homelng = config['geoip/homelng'].strip('\'')
db = config['geoip/db'].strip('\'')
try:
gi = pygeoip.GeoIP(db)
except Exception as e:
return str(e)
rec = gi.record_by_addr(ip)
if rec is not None:
return rec
except Exception as e:
return str(e)
| nilq/baby-python | python |
#python3 code
def count(i,s):
ans=0
for j in range(i,len(s)):
if(s[j]=="<"):
ans+=1
return ans
def higher(s):
res=0
for i in range(len(s)):
if(s[i]==">"):
b=count(i,s)
res=res+(b*2)
return res
def solution(s):
# Your code here
result=higher(s)
return result
| nilq/baby-python | python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FleetArgs', 'Fleet']
@pulumi.input_type
class FleetArgs:
def __init__(__self__, *,
compute_capacity: pulumi.Input['FleetComputeCapacityArgs'],
instance_type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input['FleetDomainJoinInfoArgs']] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['FleetVpcConfigArgs']] = None):
"""
The set of arguments for constructing a Fleet resource.
:param pulumi.Input['FleetComputeCapacityArgs'] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input['FleetDomainJoinInfoArgs'] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input['FleetVpcConfigArgs'] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
pulumi.set(__self__, "compute_capacity", compute_capacity)
pulumi.set(__self__, "instance_type", instance_type)
if description is not None:
pulumi.set(__self__, "description", description)
if disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "disconnect_timeout_in_seconds", disconnect_timeout_in_seconds)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if domain_join_info is not None:
pulumi.set(__self__, "domain_join_info", domain_join_info)
if enable_default_internet_access is not None:
pulumi.set(__self__, "enable_default_internet_access", enable_default_internet_access)
if fleet_type is not None:
pulumi.set(__self__, "fleet_type", fleet_type)
if iam_role_arn is not None:
pulumi.set(__self__, "iam_role_arn", iam_role_arn)
if idle_disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "idle_disconnect_timeout_in_seconds", idle_disconnect_timeout_in_seconds)
if image_arn is not None:
pulumi.set(__self__, "image_arn", image_arn)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if max_user_duration_in_seconds is not None:
pulumi.set(__self__, "max_user_duration_in_seconds", max_user_duration_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if stream_view is not None:
pulumi.set(__self__, "stream_view", stream_view)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> pulumi.Input['FleetComputeCapacityArgs']:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@compute_capacity.setter
def compute_capacity(self, value: pulumi.Input['FleetComputeCapacityArgs']):
pulumi.set(self, "compute_capacity", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@disconnect_timeout_in_seconds.setter
def disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> Optional[pulumi.Input['FleetDomainJoinInfoArgs']]:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@domain_join_info.setter
def domain_join_info(self, value: Optional[pulumi.Input['FleetDomainJoinInfoArgs']]):
pulumi.set(self, "domain_join_info", value)
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@enable_default_internet_access.setter
def enable_default_internet_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_internet_access", value)
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> Optional[pulumi.Input[str]]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@fleet_type.setter
def fleet_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fleet_type", value)
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@iam_role_arn.setter
def iam_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_role_arn", value)
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@idle_disconnect_timeout_in_seconds.setter
def idle_disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@image_arn.setter
def image_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_arn", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@max_user_duration_in_seconds.setter
def max_user_duration_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_user_duration_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> Optional[pulumi.Input[str]]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@stream_view.setter
def stream_view(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_view", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FleetVpcConfigArgs']]:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FleetVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
@pulumi.input_type
class _FleetState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
compute_capacity: Optional[pulumi.Input['FleetComputeCapacityArgs']] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input['FleetDomainJoinInfoArgs']] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['FleetVpcConfigArgs']] = None):
"""
Input properties used for looking up and filtering Fleet resources.
:param pulumi.Input[str] arn: ARN of the appstream fleet.
:param pulumi.Input['FleetComputeCapacityArgs'] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] created_time: Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input['FleetDomainJoinInfoArgs'] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] state: State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input['FleetVpcConfigArgs'] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if compute_capacity is not None:
pulumi.set(__self__, "compute_capacity", compute_capacity)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "disconnect_timeout_in_seconds", disconnect_timeout_in_seconds)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if domain_join_info is not None:
pulumi.set(__self__, "domain_join_info", domain_join_info)
if enable_default_internet_access is not None:
pulumi.set(__self__, "enable_default_internet_access", enable_default_internet_access)
if fleet_type is not None:
pulumi.set(__self__, "fleet_type", fleet_type)
if iam_role_arn is not None:
pulumi.set(__self__, "iam_role_arn", iam_role_arn)
if idle_disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "idle_disconnect_timeout_in_seconds", idle_disconnect_timeout_in_seconds)
if image_arn is not None:
pulumi.set(__self__, "image_arn", image_arn)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if max_user_duration_in_seconds is not None:
pulumi.set(__self__, "max_user_duration_in_seconds", max_user_duration_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if state is not None:
pulumi.set(__self__, "state", state)
if stream_view is not None:
pulumi.set(__self__, "stream_view", stream_view)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the appstream fleet.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> Optional[pulumi.Input['FleetComputeCapacityArgs']]:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@compute_capacity.setter
def compute_capacity(self, value: Optional[pulumi.Input['FleetComputeCapacityArgs']]):
pulumi.set(self, "compute_capacity", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@disconnect_timeout_in_seconds.setter
def disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> Optional[pulumi.Input['FleetDomainJoinInfoArgs']]:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@domain_join_info.setter
def domain_join_info(self, value: Optional[pulumi.Input['FleetDomainJoinInfoArgs']]):
pulumi.set(self, "domain_join_info", value)
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@enable_default_internet_access.setter
def enable_default_internet_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_internet_access", value)
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> Optional[pulumi.Input[str]]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@fleet_type.setter
def fleet_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fleet_type", value)
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@iam_role_arn.setter
def iam_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_role_arn", value)
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@idle_disconnect_timeout_in_seconds.setter
def idle_disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@image_arn.setter
def image_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_arn", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@max_user_duration_in_seconds.setter
def max_user_duration_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_user_duration_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> Optional[pulumi.Input[str]]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@stream_view.setter
def stream_view(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_view", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FleetVpcConfigArgs']]:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FleetVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
class Fleet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None,
__props__=None):
"""
Provides an AppStream fleet.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_fleet = aws.appstream.Fleet("testFleet",
compute_capacity=aws.appstream.FleetComputeCapacityArgs(
desired_instances=1,
),
description="test fleet",
display_name="test-fleet",
enable_default_internet_access=False,
fleet_type="ON_DEMAND",
idle_disconnect_timeout_in_seconds=60,
image_name="Amazon-AppStream2-Sample-Image-02-04-2019",
instance_type="stream.standard.large",
max_user_duration_in_seconds=600,
tags={
"TagName": "tag-value",
},
vpc_config=aws.appstream.FleetVpcConfigArgs(
subnet_ids=["subnet-06e9b13400c225127"],
))
```
## Import
`aws_appstream_fleet` can be imported using the id, e.g.,
```sh
$ pulumi import aws:appstream/fleet:Fleet example fleetNameExample
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FleetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an AppStream fleet.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_fleet = aws.appstream.Fleet("testFleet",
compute_capacity=aws.appstream.FleetComputeCapacityArgs(
desired_instances=1,
),
description="test fleet",
display_name="test-fleet",
enable_default_internet_access=False,
fleet_type="ON_DEMAND",
idle_disconnect_timeout_in_seconds=60,
image_name="Amazon-AppStream2-Sample-Image-02-04-2019",
instance_type="stream.standard.large",
max_user_duration_in_seconds=600,
tags={
"TagName": "tag-value",
},
vpc_config=aws.appstream.FleetVpcConfigArgs(
subnet_ids=["subnet-06e9b13400c225127"],
))
```
## Import
`aws_appstream_fleet` can be imported using the id, e.g.,
```sh
$ pulumi import aws:appstream/fleet:Fleet example fleetNameExample
```
:param str resource_name: The name of the resource.
:param FleetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FleetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FleetArgs.__new__(FleetArgs)
if compute_capacity is None and not opts.urn:
raise TypeError("Missing required property 'compute_capacity'")
__props__.__dict__["compute_capacity"] = compute_capacity
__props__.__dict__["description"] = description
__props__.__dict__["disconnect_timeout_in_seconds"] = disconnect_timeout_in_seconds
__props__.__dict__["display_name"] = display_name
__props__.__dict__["domain_join_info"] = domain_join_info
__props__.__dict__["enable_default_internet_access"] = enable_default_internet_access
__props__.__dict__["fleet_type"] = fleet_type
__props__.__dict__["iam_role_arn"] = iam_role_arn
__props__.__dict__["idle_disconnect_timeout_in_seconds"] = idle_disconnect_timeout_in_seconds
__props__.__dict__["image_arn"] = image_arn
__props__.__dict__["image_name"] = image_name
if instance_type is None and not opts.urn:
raise TypeError("Missing required property 'instance_type'")
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["max_user_duration_in_seconds"] = max_user_duration_in_seconds
__props__.__dict__["name"] = name
__props__.__dict__["stream_view"] = stream_view
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc_config"] = vpc_config
__props__.__dict__["arn"] = None
__props__.__dict__["created_time"] = None
__props__.__dict__["state"] = None
__props__.__dict__["tags_all"] = None
super(Fleet, __self__).__init__(
'aws:appstream/fleet:Fleet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None) -> 'Fleet':
"""
Get an existing Fleet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: ARN of the appstream fleet.
:param pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] created_time: Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] state: State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FleetState.__new__(_FleetState)
__props__.__dict__["arn"] = arn
__props__.__dict__["compute_capacity"] = compute_capacity
__props__.__dict__["created_time"] = created_time
__props__.__dict__["description"] = description
__props__.__dict__["disconnect_timeout_in_seconds"] = disconnect_timeout_in_seconds
__props__.__dict__["display_name"] = display_name
__props__.__dict__["domain_join_info"] = domain_join_info
__props__.__dict__["enable_default_internet_access"] = enable_default_internet_access
__props__.__dict__["fleet_type"] = fleet_type
__props__.__dict__["iam_role_arn"] = iam_role_arn
__props__.__dict__["idle_disconnect_timeout_in_seconds"] = idle_disconnect_timeout_in_seconds
__props__.__dict__["image_arn"] = image_arn
__props__.__dict__["image_name"] = image_name
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["max_user_duration_in_seconds"] = max_user_duration_in_seconds
__props__.__dict__["name"] = name
__props__.__dict__["state"] = state
__props__.__dict__["stream_view"] = stream_view
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_config"] = vpc_config
return Fleet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the appstream fleet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> pulumi.Output['outputs.FleetComputeCapacity']:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> pulumi.Output[str]:
"""
Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> pulumi.Output[int]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> pulumi.Output['outputs.FleetDomainJoinInfo']:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> pulumi.Output[bool]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> pulumi.Output[str]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> pulumi.Output[str]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> pulumi.Output[str]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@property
@pulumi.getter(name="imageName")
def image_name(self) -> pulumi.Output[str]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Output[str]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> pulumi.Output[int]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> pulumi.Output[str]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> pulumi.Output['outputs.FleetVpcConfig']:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
| nilq/baby-python | python |
if x == 'none':
if False:
print('None')
elif x == None:
print('oh')
elif x == 12:
print('oh')
else:
print(123)
if foo:
foo()
elif bar:
bar()
else:
if baz:
baz()
elif garply:
garply()
else:
qux()
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http: //www.runoob.com/python/python-exercise-example70.html
if __name__ == "__main__":
# s = input("please input a string:\n")
s = "Hello World"
print("the string has %d characters." % len(s))
| nilq/baby-python | python |
"""
Orchestrator module
"""
import logging
import os
import re
import shutil
import traceback
from functools import wraps
from glob import glob
from io import open
import six
from halo import Halo
from tabulate import tabulate
from toscaparser.common.exception import ValidationError
from yaml.scanner import ScannerError
from termcolor import colored
from . import docker_interface, helper, protocol_helper
from .graph.nodes import Container, Software, Volume
from .graph.protocol import (CONTAINER_STATE_CREATED, CONTAINER_STATE_DELETED,
CONTAINER_STATE_RUNNING, SOFTWARE_STATE_ZOTTED,
STATE_RUNNING, VOLUME_STATE_CREATED,
VOLUME_STATE_DELETED)
from .helper import Logger
from .managers.container_manager import ContainerManager
from .managers.software_manager import SoftwareManager
from .managers.volume_manager import VolumeManager
from .storage import Memory
from .tosca_parser import get_tosca_template
try:
from os import scandir
except ImportError:
from scandir import scandir
class Orchestrator:
def update_memory(f):
"""decorator that update memory before execute function"""
@wraps(f)
def decorated_function(*args, **kwargs):
status, faulty = args[0]._update_state()
Logger.println('(update memory: {})'.format(
'ok' if status else 'fixed {}'.format(', '.join(faulty))))
return f(*args, **kwargs)
return decorated_function
def __init__(self,
log_handler=logging.NullHandler(),
quiet=True,
tmp_dir='/tmp/tosker',
data_dir='/tmp/tosker'): # TODO: use /usr/lib/tokser instead
Logger.set(log_handler, quiet)
self._log = Logger.get(__name__)
self._tmp_dir = tmp_dir
# Setup Storage system (folder and class)
self._data_dir = data_dir
try:
os.makedirs(data_dir)
except os.error:
pass
Memory.set_db(data_dir)
@update_memory
def orchestrate(self, file_path, plan, inputs=None):
"""
Start the orchestration using the management protocols.
plan must be a list of tuple (component, full_operation)
"""
# Parse TOSCA file
tpl = self._parse_tosca(file_path, inputs)
if tpl is None:
return False
# Check plan format
if not self._check_plan_format(tpl, plan):
self._log.debug(plan)
Logger.print_error('Plan format not correct')
return False
# Create tmp directory for the template
self._create_tmp_dir(tpl)
# Load components state
if not self._load_component_state(tpl):
Logger.print_error('Cannot load components state,'
'try to use "tosker prune" to hard reset.')
return False
self._log.debug('State: %s', ' '.join(
(c['name'] + '.' + c['state'] for c in Memory.get_comps(tpl.name))))
try:
# Check plan
self._print_loading_start('Check deployment plan... ')
for component, full_operation in plan:
try:
protocol_helper.can_execute(full_operation, component)
component.protocol.execute_operation(full_operation)
except ValueError as e:
self._print_cross('Error on {}.{}: {}'
''.format(component.name, full_operation, e))
return False
self._load_component_state(tpl)
self._print_tick()
# Create Network
# TODO: do not create network if already there
self._print_loading_start('Create network... ')
docker_interface.create_network(tpl.name)
self._print_tick()
# Execute plan
for component, full_operation in plan:
protocol = component.protocol
self._log.debug('Component %s is in state %s',
component.name, component.protocol.current_state)
self._print_loading_start('Execute op "{}" on "{}"... '
''.format(full_operation, component.name))
transition = protocol.next_transition(full_operation)
self._log.debug('transition: i={} o={}'.format(
transition.interface, transition.operation))
if isinstance(component, Container):
ContainerManager.exec_operation(
component, transition.operation)
elif isinstance(component, Volume):
VolumeManager.exec_operation(
component, transition.operation)
elif isinstance(component, Software):
SoftwareManager.exec_operation(component, transition.interface,
transition.operation)
state = protocol.execute_operation(full_operation)
# remove the component if it is in the initial state
if state == protocol.initial_state:
Memory.remove(component)
else:
Memory.update_state(component, state.name)
self._print_tick()
self._print_outputs(tpl)
except Exception as e:
self._log.debug('Exception type: %s', type(e))
self._log.debug(traceback.format_exc())
self._print_cross(e)
return False
return True
@update_memory
def ls_components(self, app=None, filters={}):
comps = Memory.get_comps(app, filters)
def get_state(state):
return colored(state, ('green' if state == STATE_RUNNING else None))
def format_row(comp):
return [comp['app_name'],
comp['name'],
comp['type'],
get_state(comp['state']),
'{}.{}'.format(comp['app_name'], comp['name'])]
table = [format_row(c) for c in comps]
table_str = tabulate(table, headers=['Application', 'Component',
'Type', 'State', 'Full name'])
Logger.println(table_str)
def log(self, component, operation):
# TODO: add logs also for Docker container
app, name = helper.split(component, '.')
if app is None:
Logger.print_error('First argument must be a component full name '
'(i.e my_app.my_component)')
return
if '.' not in operation:
operation = 'Standard.{}'.format(operation)
self._log.debug('app: %s, name: %s, operation: %s',
app, name, operation)
log_file_name = '{}/{}/*/{}/{}.log'.format(self._tmp_dir,
app, name, operation)
log_file = glob(log_file_name)
if len(log_file) != 1:
Logger.print_error('Component or operation log not found')
return
with open(log_file[0], 'r', encoding='utf-8', errors='ignore') as f:
for line in f.readlines():
line = colored(line, 'green') if line.startswith(
'+ ') else line
Logger.print_(line)
def prune(self):
self._print_loading_start('Remove containers.. ')
con = docker_interface.get_containers(all=True)
for c in (c for c in con if c['Names'][0].startswith('/tosker')):
self._log.debug(c['Names'][0])
docker_interface.delete_container(c['Id'], force=True)
self._print_tick()
self._print_loading_start('Remove volumes.. ')
vol = docker_interface.get_volumes()
for v in (v for v in vol if v['Name'].startswith('tosker')):
self._log.debug(v['Name'])
docker_interface.delete_volume(v['Name'])
self._print_tick()
# TODO: remove also networks
self._print_loading_start('Remove tosker data.. ')
shutil.rmtree(self._tmp_dir)
self._print_tick()
def parse_operations(self, operations):
"""
Transform a ["component:interface.operation"..] in
[("component","interface.operation")..]
"""
return [helper.split(op.strip(), ':') for op in operations]
def read_plan_file(self, file):
"""Parse the operation from a general plan file (.csv, .plan, other)"""
with open(file, 'r') as fstream:
_, ext = os.path.splitext(file)
if '.csv' == ext:
return self._read_csv(fstream)
elif '.plan' == ext:
return self._read_plan(fstream)
else:
Logger.print_error('Plan file format not supported.')
pass
def _read_csv(self, stream):
"""
Get a file stream of a .csv file and return a list
of tuple (componet, interface.operation).
"""
return [(l[0], '{}.{}'.format(l[1], l[2]))
for l in (l.strip().split(',')
for l in stream.readlines())]
def _read_plan(self, stream):
"""
Get a file streame of a .plan file and return a list
of tuple (componet, interface.operation).
"""
return self.parse_operations(
[l for l in (l.strip() for l in stream.readlines())
if l and not l.startswith('#')])
def _parse_tosca(self, file_path, inputs):
'''
Parse TOSCA file
'''
try:
return get_tosca_template(file_path, inputs)
except ScannerError as e:
Logger.print_error('YAML parse error\n {}'.format(e))
return None
except ValidationError as e:
Logger.print_error('TOSCA validation error\n {}'.format(e))
return None
except ValueError as e:
Logger.print_error('TosKer validation error\n {}'.format(e))
self._log.debug(colored(traceback.format_exc(), 'red'))
return None
except Exception as e:
Logger.print_error('Internal error\n {}'.format(e))
self._log.debug('Exception type: %s', type(e))
self._log.debug(colored(traceback.format_exc(), 'red'))
return None
def _create_tmp_dir(self, tpl):
'''
Create temporany directory
'''
tpl.tmp_dir = os.path.join(self._tmp_dir, tpl.name)
try:
os.makedirs(tpl.tmp_dir)
except os.error as e:
self._log.info(e)
def _check_plan_format(self, tpl, operations):
"""
operation: [("component", "interface.operation")..]
"""
for i, op in enumerate(operations):
if not (isinstance(op, tuple) and len(op) == 2):
Logger.print_error('Plan is not in the right format')
return False
comp_name, full_operation = op
# Check that the component existes in the template
comp = tpl[comp_name]
if comp is None:
Logger.print_error(
'Component "{}" not found in template.'.format(comp_name))
return False
# check that the component has interface.operation
interface, operation = helper.split(full_operation, '.')
if interface not in comp.interfaces and\
operation not in comp.interfaces[interface]:
Logger.print_error('Component "{}" not has the "{}"'
'operation in the "{}" interface.'
''.format(comp_name, operation, interface))
return False
operations[i] = comp, full_operation
return True
def _load_component_state(self, tpl):
for comp in tpl.nodes:
state = Memory.get_comp_state(comp)
if state is not None:
state = comp.protocol.find_state(state)
if state is not None:
comp.protocol.current_state = state
else:
return False
else:
comp.protocol.reset()
return True
def _print_outputs(self, tpl):
if len(tpl.outputs) != 0:
Logger.println('\nOUTPUTS:')
for out in tpl.outputs:
self._log.debug('value: %s', out.value)
value = out.value if isinstance(out.value, six.string_types) \
else helper.get_attributes(out.value.args, tpl)
Logger.println(' - ' + out.name + ":", value)
def _update_state(self):
errors = set()
def manage_error(comp, state):
errors.add(comp['full_name'])
Memory.update_state(comp, state)
def manage_error_container(comp, state):
manage_error(comp, state)
path = os.path.join(self._tmp_dir, comp['app_name'], comp['name'])
try:
software = [(f.name, f.path) for f in scandir(path)
if f.is_dir()]
except FileNotFoundError as e:
software = []
self._log.debug('path %s found %s', path, software)
for s, s_path in software:
full_name = '{}.{}'.format(comp['app_name'], s)
Memory.update_state('{}.{}'.format(
comp['app_name'], s), SOFTWARE_STATE_ZOTTED)
errors.add(full_name)
for container in Memory.get_comps(filters={'type': 'Container'}):
status = docker_interface.inspect_container(container['full_name'])
deleted, created, running = status is None,\
status is not None and not status['State']['Running'],\
status is not None and status['State']['Running']
if deleted and container['state'] != CONTAINER_STATE_DELETED:
manage_error_container(container, CONTAINER_STATE_DELETED)
elif created and container['state'] != CONTAINER_STATE_CREATED:
manage_error_container(container, CONTAINER_STATE_CREATED)
elif running and container['state'] != CONTAINER_STATE_RUNNING:
manage_error_container(container, CONTAINER_STATE_RUNNING)
for volume in Memory.get_comps(filters={'type': 'Volume'}):
status = docker_interface.inspect_volume(volume['full_name'])
if status is None:
manage_error(volume, VOLUME_STATE_DELETED)
return len(errors) == 0, errors
def _print_tick(self):
self._loading_thread.succeed(self._loading_thread.text + 'Done')
def _print_skip(self):
self._loading_thread.info(self._loading_thread.text + 'Skipped')
def _print_cross(self, error):
self._loading_thread.fail(self._loading_thread.text + '\n' +
colored(error, 'red'))
def _print_loading_start(self, msg):
self._loading_thread = Halo(text=msg, spinner='dots')
self._loading_thread.start()
| nilq/baby-python | python |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings.app_cfg import VALID_PIPE_MEDIA_EXTS
from vframe.utils.click_utils import generator
@click.command('')
@click.option('-i', '--input', 'opt_input', required=True,
help='Path to image or directory')
@click.option('-e', '--exts', 'opt_exts', default=VALID_PIPE_MEDIA_EXTS,
multiple=True, help='Extensions to glob for')
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True,
help='Recursive glob')
@click.option('--slice', 'opt_slice', type=(int, int), default=(-1, -1),
help="Slice list of inputs")
@click.option('--skip-frames', 'opt_skip_frames', is_flag=True,
help='Skip all frames, only iterate files')
@click.option('--check-exist', 'opt_check_exist',
is_flag=True, default=False,
help='Check files existence before processing')
@click.option('--randomize', 'opt_randomize', is_flag=True,
help='Randomize file list before slicing')
@click.option('--media-path', 'opt_new_filepath', type=str,
default='',
help='Override JSON filepath')
@generator
@click.pass_context
def cli(ctx, sink, opt_input, opt_recursive, opt_exts, opt_slice,
opt_skip_frames, opt_check_exist, opt_randomize, opt_new_filepath):
"""Open media for processing"""
from tqdm import tqdm
import dacite
from vframe.settings.app_cfg import LOG, SKIP_FRAME, READER, SKIP_FILE
from vframe.settings.app_cfg import USE_PREHASH, USE_DRAW_FRAME
from vframe.settings.app_cfg import MEDIA_FILTERS, SKIP_MEDIA_FILTERS
from vframe.models.media import MediaFileReader
from vframe.utils.sys_utils import SignalInterrupt
from vframe.utils.file_utils import get_ext
# ---------------------------------------------------------------------------
# init
sigint = SignalInterrupt()
init_obj = {
'filepath': opt_input,
'exts': tuple(opt_exts),
'slice_idxs': opt_slice,
'recursive': opt_recursive,
'use_prehash': ctx.obj.get(USE_PREHASH, False),
'use_draw_frame': ctx.obj.get(USE_DRAW_FRAME, False),
'media_filters': ctx.obj.get(MEDIA_FILTERS, []),
'skip_all_frames': opt_skip_frames,
'opt_check_exist': opt_check_exist,
'opt_randomize': opt_randomize,
'opt_new_filepath': opt_new_filepath,
}
# init media file reader
r = dacite.from_dict(data_class=MediaFileReader, data=init_obj)
ctx.obj[READER] = r
ctx.obj[SKIP_MEDIA_FILTERS] = get_ext(opt_input) == 'json'
# error checks
if not r.n_files:
LOG.info('No files to process.')
return
# process media
for m in tqdm(r.iter_files(), total=r.n_files, desc='Files', leave=False):
ctx.obj[SKIP_FILE] = False # reset
m.skip_all_frames = opt_skip_frames
if sigint.interrupted:
m.unload()
return
for ok in tqdm(m.iter_frames(), total=m.n_frames, desc=m.fn, disable=m.n_frames <= 1, leave=False):
ctx.obj[SKIP_FRAME] = (opt_skip_frames or m.skip_all_frames)
# TODO: cleanup
if ctx.obj.get(SKIP_FILE, False) or m._skip_file:
ctx.obj[SKIP_FILE] = True
m.set_skip_file()
# check for ctl-c, exit gracefully
if sigint.interrupted:
m.unload()
return
sink.send(m)
# print stats
LOG.info(r.stats) | nilq/baby-python | python |
import os
import torch
import numpy as np
import warnings
try:
from typing import Protocol
except ImportError: # noqa
# Python < 3.8
class Protocol:
pass
from .dsp.overlap_add import LambdaOverlapAdd
from .utils import get_device
class Separatable(Protocol):
"""Things that are separatable."""
def forward_wav(self, wav, **kwargs):
"""
Args:
wav (torch.Tensor): waveform tensor.
Shape: 1D, 2D or 3D tensor, time last.
**kwargs: Keyword arguments from `separate`.
Returns:
torch.Tensor: the estimated sources.
Shape: [batch, n_src, time] or [n_src, time] if the input `wav`
did not have a batch dim.
"""
...
@property
def sample_rate(self):
"""Operating sample rate of the model (float)."""
...
def separate(
model: Separatable, wav, output_dir=None, force_overwrite=False, resample=False, **kwargs
):
"""Infer separated sources from input waveforms.
Also supports filenames.
Args:
model (Separatable, for example asteroid.models.BaseModel): Model to use.
wav (Union[torch.Tensor, numpy.ndarray, str]): waveform array/tensor.
Shape: 1D, 2D or 3D tensor, time last.
output_dir (str): path to save all the wav files. If None,
estimated sources will be saved next to the original ones.
force_overwrite (bool): whether to overwrite existing files
(when separating from file).
resample (bool): Whether to resample input files with wrong sample rate
(when separating from file).
**kwargs: keyword arguments to be passed to `forward_wav`.
Returns:
Union[torch.Tensor, numpy.ndarray, None], the estimated sources.
(batch, n_src, time) or (n_src, time) w/o batch dim.
.. note::
`separate` calls `model.forward_wav` which calls `forward` by default.
For models whose `forward` doesn't have waveform tensors as input/ouput,
overwrite their `forward_wav` method to separate from waveform to waveform.
"""
if isinstance(wav, str):
file_separate(
model,
wav,
output_dir=output_dir,
force_overwrite=force_overwrite,
resample=resample,
**kwargs,
)
elif isinstance(wav, np.ndarray):
return numpy_separate(model, wav, **kwargs)
elif isinstance(wav, torch.Tensor):
return torch_separate(model, wav, **kwargs)
else:
raise ValueError(
f"Only support filenames, numpy arrays and torch tensors, received {type(wav)}"
)
@torch.no_grad()
def torch_separate(model: Separatable, wav: torch.Tensor, **kwargs) -> torch.Tensor:
"""Core logic of `separate`."""
# Handle device placement
input_device = get_device(wav, default="cpu")
model_device = get_device(model, default="cpu")
wav = wav.to(model_device)
# Forward
separate_func = getattr(model, "forward_wav", model)
out_wavs = separate_func(wav, **kwargs)
# FIXME: for now this is the best we can do.
out_wavs *= wav.abs().sum() / (out_wavs.abs().sum())
# Back to input device (and numpy if necessary)
out_wavs = out_wavs.to(input_device)
return out_wavs
def numpy_separate(model: Separatable, wav: np.ndarray, **kwargs) -> np.ndarray:
"""Numpy interface to `separate`."""
wav = torch.from_numpy(wav)
out_wavs = torch_separate(model, wav, **kwargs)
out_wavs = out_wavs.data.numpy()
return out_wavs
def file_separate(
model: Separatable,
filename: str,
output_dir=None,
force_overwrite=False,
resample=False,
**kwargs,
) -> None:
"""Filename interface to `separate`."""
import soundfile as sf
if not hasattr(model, "sample_rate"):
if isinstance(model, LambdaOverlapAdd):
model = model.nnet
raise TypeError(
f"This function requires your model ({type(model).__name__}) to have a "
"'sample_rate' attribute. See `BaseModel.sample_rate` for details."
)
# SoundFile wav shape: [time, n_chan]
wav, fs = sf.read(filename, dtype="float32", always_2d=True)
if wav.shape[-1] > 1:
warnings.warn(
f"Received multichannel signal with {wav.shape[-1]} signals, "
f"using the first channel only."
)
# FIXME: support only single-channel files for now.
if fs != model.sample_rate:
if resample:
from librosa import resample
wav = resample(wav[:, 0], orig_sr=fs, target_sr=model.sample_rate)[:, None]
else:
raise RuntimeError(
f"Received a signal with a sampling rate of {fs}Hz for a model "
f"of {model.sample_rate}Hz. You can pass `resample=True` to resample automatically."
)
# Pass wav as [batch, n_chan, time]; here: [1, 1, time]
wav = wav[:, 0][None, None]
(to_save,) = numpy_separate(model, wav, **kwargs)
# Save wav files to filename_est1.wav etc...
for src_idx, est_src in enumerate(to_save):
base = ".".join(filename.split(".")[:-1])
save_name = base + "_est{}.".format(src_idx + 1) + filename.split(".")[-1]
if output_dir is not None:
save_name = os.path.join(output_dir, save_name.split("/")[-1])
if os.path.isfile(save_name) and not force_overwrite:
warnings.warn(
f"File {save_name} already exists, pass `force_overwrite=True` to overwrite it",
UserWarning,
)
return
if fs != model.sample_rate:
from librosa import resample
est_src = resample(est_src, orig_sr=model.sample_rate, target_sr=fs)
sf.write(save_name, est_src, fs)
| nilq/baby-python | python |
import pickle
import brewer2mpl
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from absl import app, flags
from utils import *
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '', 'Path to the base dir where the logs are')
flags.DEFINE_bool('small_paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('stretched', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('slide_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('poster_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_string('file_format', 'png', 'File type of the output plot.')
flags.DEFINE_string('file_name', 'prediction-runtime-horizon',
'Name of the file to output to.')
flags.DEFINE_list('horizons', '10,20,30,40,50',
'Comma separated list of horizons.')
flags.DEFINE_integer('hz', '10', 'Frequency of predictions.')
def main(argv):
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors[3:]
hatches = ["////", "****"]
if FLAGS.paper_mode:
plt.figure(figsize=(3.33, 2.22))
set_paper_rcs()
elif FLAGS.small_paper_mode:
plt.figure(figsize=(2.4, 1.66))
set_paper_rcs()
elif FLAGS.stretched:
plt.figure(figsize=(3, 1.4))
set_paper_rcs()
elif FLAGS.slide_mode:
plt.figure(figsize=(8, 6))
set_slide_rcs()
elif FLAGS.poster_mode:
plt.figure(figsize=(12, 9))
set_poster_rcs()
else:
plt.figure()
set_rcs()
ax = plt.gca()
models = ["mfp", "r2p2"]
legend_elements = []
dfs = []
for i, model in enumerate(models):
for h in FLAGS.horizons:
file_name = '{}/{}_timely_horizon_{}.pkl'.format(
FLAGS.base_dir, model, h)
f = open(file_name, 'rb')
num_secs = int(int(h) * 1.0 / FLAGS.hz)
data = pickle.load(f)
df = pd.DataFrame({
'model': [model] * len(data),
'horizon': [num_secs] * len(data),
'runtime': data
})
dfs.append(df)
if model == 'mfp':
label = 'MFP'
elif model == 'r2p2':
label = 'R2P2-MA'
else:
label = model
legend_elements.append(
Patch(facecolor=colors[i],
alpha=0.6,
hatch=hatches[i],
label=label))
data = pd.concat(dfs)
ax = sns.boxplot(x='horizon',
y='runtime',
hue='model',
data=data,
palette=colors,
width=0.7,
saturation=1,
whis=(5, 95),
showfliers=False)
for i, box in enumerate(ax.artists):
box.set_hatch(hatches[i % len(models)])
adjust_box_widths(plt.gcf(), 0.8)
plt.legend(handles=legend_elements,
framealpha=0,
handlelength=1.5,
handletextpad=0.1)
plt.xlabel('Prediction horizon [s]')
plt.ylabel('Runtime [ms]')
plt.savefig("{}.{}".format(FLAGS.file_name, FLAGS.file_format),
format=FLAGS.file_format,
bbox_inches='tight')
if __name__ == '__main__':
app.run(main)
| nilq/baby-python | python |
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Blurring-a-part-of-an-image-in-Python" data-toc-modified-id="Blurring-a-part-of-an-image-in-Python-1"><span class="toc-item-num">1 </span>Blurring a part of an image in Python</a></div><div class="lev2 toc-item"><a href="#Blur-all-the-image" data-toc-modified-id="Blur-all-the-image-11"><span class="toc-item-num">1.1 </span>Blur all the image</a></div><div class="lev2 toc-item"><a href="#Blur-only-an-area-of-the-image" data-toc-modified-id="Blur-only-an-area-of-the-image-12"><span class="toc-item-num">1.2 </span>Blur only an area of the image</a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-13"><span class="toc-item-num">1.3 </span>Conclusion</a></div>
# # Blurring a part of an image in Python
#
# This very short notebook shows how to open an image (eg a PNG image), and nicely blur a part of it.
# In[1]:
import numpy as np
import skimage
# In[2]:
get_ipython().run_line_magic('load_ext', 'watermark')
get_ipython().run_line_magic('watermark', '-v -m -a "Lilian Besson (Naereen)" -p numpy,skimage -g')
# ## Blur all the image
# Let's import one of the example image, and blur all of it using [`skimage.filters.gaussian`](http://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.gaussian).
# In[9]:
from skimage import data, io, filters
image = data.astronaut()
# In[10]:
def imshow(image):
io.imshow(image)
io.show()
# In[11]:
imshow(image)
# In[5]:
from skimage.filters import gaussian
# In[12]:
filtered_img = gaussian(image, sigma=1, multichannel=True)
imshow(filtered_img)
# In[13]:
filtered_img = gaussian(image, sigma=2, multichannel=True)
imshow(filtered_img)
# ## Blur only an area of the image
# In[17]:
image.shape
# In[71]:
def blur(image, x0, x1, y0, y1, sigma=1, imshowall=False):
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
im = image.copy()
sub_im = im[x0:x1,y0:y1].copy()
if imshowall: imshow(sub_im)
blur_sub_im = gaussian(sub_im, sigma=sigma)
if imshowall: imshow(blur_sub_im)
blur_sub_im = np.round(255 * blur_sub_im)
im[x0:x1,y0:y1] = blur_sub_im
return im
# In[72]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=1)
imshow(filtered_img)
# In[76]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=5)
imshow(filtered_img)
# In[73]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=10)
imshow(filtered_img)
# In[74]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=20)
imshow(filtered_img)
# ## Conclusion
#
# That's it.
| nilq/baby-python | python |
import json
import logging
import requests
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import status
class ExternalUmbrellaServiceAuthenticationBackend:
logger = logging.getLogger(__name__)
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
:param user_id:
:return:
"""
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def authenticate(self, request, uid=None, eaa_hash=None):
self.logger.debug('Attempting to authenticate via umbrella')
try:
if None in (uid, eaa_hash):
self.logger.warning('Tried to authenticate user with missing fields, rejecting')
return None
post_data = {'eaa_hash': eaa_hash}
headers = {'Content-type': 'application/json'}
response = requests.post(settings.BACKEND_UO_HASH, data=json.dumps(post_data), headers=headers)
if response.status_code == status.HTTP_200_OK:
self.logger.info('Authenticated %s', uid)
try:
user = User.objects.get(username=uid)
return user
except User.DoesNotExist as udne:
self.logger.info('Creating %s user in django database, as it is not yet present', uid)
# User will have unusable password, it is authenticated externally
user = User.objects.create_user(uid, '')
user.save()
return user
return None
except Exception as e:
self.logger.debug(e) | nilq/baby-python | python |
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
from nnabla.testing import assert_allclose
ctxs = list_context('TopNError')
def ref_top_n_error(x, l, axis, n):
orig_x = x.copy()
x = np.rollaxis(x, axis, x.ndim).reshape(-1, x.shape[axis])
ll = np.rollaxis(l, axis, x.ndim).flatten()
y = []
for x_, ll_ in zip(x, ll):
threshold = x_[ll_]
count = 0
for x__ in x_:
if x__ >= threshold:
count += 1
y.append(1 if count > n else 0)
return np.array(y).reshape(l.shape)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2, -1, -2, -3])
@pytest.mark.parametrize("n", [3, 5])
def test_top_n_error_forward(seed, axis, n, ctx, func_name):
ishape = [5, 6, 7]
rng = np.random.RandomState(seed)
l_shape = list(ishape)
l_shape[axis] = 1
n_class = ishape[axis]
inputs = [
rng.rand(5, 6, 7).astype(np.float32) * 0.9 + 0.05,
rng.randint(0, n_class, size=l_shape).astype(np.int)]
ref = ref_top_n_error(inputs[0], inputs[1], axis, n)
x = nn.Variable(ishape)
l = nn.Variable(l_shape)
y = F.top_n_error(x, l, axis, n)
x.d = inputs[0]
l.d = inputs[1]
y.forward()
res = y.d
atol_f = 1e-6
assert_allclose(ref, res, atol=atol_f)
| nilq/baby-python | python |
from tclCommands.TclCommand import TclCommandSignaled
import collections
class TclCommandMirror(TclCommandSignaled):
"""
Tcl shell command to mirror an object.
"""
# array of all command aliases, to be able use
# old names for backward compatibility (add_poly, add_polygon)
aliases = ['mirror']
description = '%s %s' % ("--", "Will mirror the geometry of a named object. Does not create a new object.")
# Dictionary of types from Tcl command, needs to be ordered.
# For positional arguments
arg_names = collections.OrderedDict([
('name', str)
])
# Dictionary of types from Tcl command, needs to be ordered.
# For options like -optionname value
option_types = collections.OrderedDict([
('axis', str),
('box', str),
('origin', str)
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = ['name']
# structured help for current command, args needs to be ordered
help = {
'main': "Will mirror the geometry of a named object. Does not create a new object.",
'args': collections.OrderedDict([
('name', 'Name of the object (Gerber, Geometry or Excellon) to be mirrored. Required.'),
('axis', 'Mirror axis parallel to the X or Y axis.'),
('box', 'Name of object which act as box (cutout for example.)'),
('origin', 'Reference point . It is used only if the box is not used. Format (x,y).\n'
'Comma will separate the X and Y coordinates.\n'
'WARNING: no spaces are allowed. If uncertain enclose the two values inside parenthesis.\n'
'See the example.')
]),
'examples': ['mirror obj_name -box box_geo -axis X -origin 3.2,4.7']
}
def execute(self, args, unnamed_args):
"""
Execute this TCL shell command
:param args: array of known named arguments and options
:param unnamed_args: array of other values which were passed into command
without -somename and we do not have them in known arg_names
:return: None or exception
"""
name = args['name']
# Get source object.
try:
obj = self.app.collection.get_by_name(str(name))
except Exception:
return "Could not retrieve object: %s" % name
if obj is None:
return "Object not found: %s" % name
if obj.kind != 'gerber' and obj.kind != 'geometry' and obj.kind != 'excellon':
return "ERROR: Only Gerber, Excellon and Geometry objects can be mirrored."
# Axis
if 'axis' in args:
try:
axis = args['axis'].upper()
except KeyError:
axis = 'Y'
else:
axis = 'Y'
# Box
if 'box' in args:
try:
box = self.app.collection.get_by_name(args['box'])
except Exception:
return "Could not retrieve object: %s" % args['box']
if box is None:
return "Object box not found: %s" % args['box']
try:
xmin, ymin, xmax, ymax = box.bounds()
px = 0.5 * (xmin + xmax)
py = 0.5 * (ymin + ymax)
obj.mirror(axis, [px, py])
obj.plot()
return
except Exception as e:
return "Operation failed: %s" % str(e)
# Origin
if 'origin' in args:
try:
origin_val = eval(args['origin'])
x = float(origin_val[0])
y = float(origin_val[1])
except KeyError:
x, y = (0, 0)
except ValueError:
return "Invalid distance: %s" % str(args['origin'])
try:
obj.mirror(axis, [x, y])
except Exception as e:
return "Operation failed: %s" % str(e)
| nilq/baby-python | python |
import torch.nn as nn
from qanet.encoder_block import EncoderBlock
class ModelEncoder(nn.Module):
def __init__(self, n_blocks=7, n_conv=2, kernel_size=7, padding=3,
hidden_size=128, conv_type='depthwise_separable', n_heads=8, context_length=400):
super(ModelEncoder, self).__init__()
self.n_conv = n_conv
self.n_blocks = n_blocks
self.total_layers = (n_conv + 2) * n_blocks
self.stacked_encoderBlocks = nn.ModuleList([EncoderBlock(n_conv=n_conv,
kernel_size=kernel_size,
padding=padding,
n_filters=hidden_size,
conv_type=conv_type,
n_heads=n_heads) for i in range(n_blocks)])
def forward(self, x, mask):
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M0 = x
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M1 = x
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M2 = x
return M0, M1, M2
| nilq/baby-python | python |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self,root):
if not root:
return (0,0)
# get res from left
left=self.helper(root.left)
# get res from right
right=self.helper(root.right)
# two situation get the max
# not rob root, we can do
max_norob=max(left[0],left[1])+max(right[0],right[1])
# not rob left and right,rob root
max_rob=root.val+left[0]+right[0]
return (max_norob,max_rob)
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
"""
1. root stolen
2. root not stolen
"""
res=self.helper(root)
return max(res[0],res[1])
| nilq/baby-python | python |
#%load_ext autoreload
#%autoreload 2
from pathlib import Path
from pprint import pformat
from hloc import extract_features, match_features, localize_inloc, visualization
dataset = Path('datasets/inloc/') # change this if your dataset is somewhere else
pairs = Path('pairs/inloc/')
loc_pairs = pairs / 'pairs-query-netvlad40.txt' # top 40 retrieved by NetVLAD
outputs = Path('outputs/inloc/') # where everything will be saved
results = outputs / 'InLoc_hloc_superpoint+superglue_netvlad40.txt' # the result file
# list the standard configurations available
print(f'Configs for feature extractors:\n{pformat(extract_features.confs)}')
print(f'Configs for feature matchers:\n{pformat(match_features.confs)}')
# pick one of the configurations for extraction and matching
# you can also simply write your own here!
feature_conf = extract_features.confs['superpoint_inloc']
matcher_conf = match_features.confs['superglue']
feature_path = extract_features.main(feature_conf, dataset, outputs)
match_path = match_features.main(matcher_conf, loc_pairs, feature_conf['output'], outputs)
localize_inloc.main(
dataset, loc_pairs, feature_path, match_path, results,
skip_matches=20) # skip database images with too few matches
save_path = "outputs/inloc/visualize.png"
visualization.save_visualize_loc_images(save_path, results, dataset, n=1, top_k_db=1, seed=2)
print("done") | nilq/baby-python | python |
from aws_cdk import (
aws_batch as _batch,
aws_ec2 as _ec2,
aws_iam as _iam,
core,
)
class BatchENV(core.Construct):
def getComputeQueue(self,queue_name):
return self.job_queue[queue_name]
def __init__(self, scope: core.Construct, id: str,CurrentVPC="default",TargetS3="default",UserName="default",**kwargs):
super().__init__(scope, id, **kwargs)
self.job_queue = {}
# batch service role
self.batch_service_role = _iam.Role(self,'BatchServiceRole',
assumed_by=_iam.ServicePrincipal('batch.amazonaws.com'),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSBatchServiceRole')
]
)
# ec2 role with policy that allow to get object from s3 bucket for batch computing
self.batch_compute_role = _iam.Role(self, 'BatchComputeRole',
assumed_by=_iam.CompositePrincipal(
_iam.ServicePrincipal('ec2.amazonaws.com'),
_iam.ServicePrincipal('ecs.amazonaws.com')
),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2RoleforSSM'),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role"),
_iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchLogsFullAccess")
]
)
TargetS3.grant_read_write(self.batch_compute_role)
self.batch_compute_instance_profile = _iam.CfnInstanceProfile(
self,
'BatchInstanceProfile' + UserName,
instance_profile_name='BatchInstanceProfile-' + UserName,
roles=[self.batch_compute_role.role_name]
)
self.ComputeENV = _batch.ComputeEnvironment(self, "ComputeENV",
service_role=self.batch_service_role,
compute_resources={
"vpc": CurrentVPC,
"instance_types":[
_ec2.InstanceType("c5"),
_ec2.InstanceType("m5")
],
"maxv_cpus":128,
"minv_cpus":0,
"type":_batch.ComputeResourceType.SPOT,
"allocation_strategy":_batch.AllocationStrategy.BEST_FIT_PROGRESSIVE,
"instance_role":self.batch_compute_instance_profile.instance_profile_name
}
)
self.ComputeQueue = _batch.JobQueue(self,"ComputeQueue",
priority=1,
compute_environments=[
_batch.JobQueueComputeEnvironment(
compute_environment=self.ComputeENV,
order=1
)
]
)
self.job_queue["ComputeQueue"] = self.ComputeQueue | nilq/baby-python | python |
from vyper import basebot
from vyper.web import interface
import os
class PluginBot(basebot.BaseBot):
def __init__(self, token, debug=False, start_loop=False, loop_time=.05, ping=True, list_plugins=False, web_app=None, name=None):
if not os.path.exists('plugins'):
os.mkdir('plugins')
with open('plugins/__init__.py', 'w') as ini:
ini.write("""import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
for importer, modname, ispkg in pkgutil.walk_packages(path=__path__, prefix=__name__+'.'):
__import__(modname)""")
import plugins
Ping.enabled = ping
self.functions = {
'message': self.message,
'edited_message': self.edited_message,
'channel_post': self.channel_post,
'edited_channel_post': self.edited_channel_post,
'inline_query': self.inline_query,
'chosen_inline_result': self.chosen_inline_result,
'callback_query': self.callback_query,
'shipping_query': self.shipping_query,
'pre_checkout_query': self.pre_checkout_query
}
self.configure(token, functions=self.functions, debug=debug)
self.plugins = list(self._get_plugins())
if list_plugins:
for plugin in self.plugins:
print(plugin)
self.web_app = web_app
if start_loop:
self.start_loop(loop_time)
def _get_plugins(self):
for plugin in Plugin.__subclasses__():
if plugin.enabled:
plugin.bot = self
yield plugin()
def test_plugins(self, msg):
if 'text' in msg:
for plugin in list(self.plugins):
plugin.message(msg)
class Plugin:
bot = None
enabled = True
def __repr__(self):
return "Plugin: {0}".format(self.__class__.__name__)
def message(self, msg):
pass
class Ping(Plugin):
def message(self, msg):
if msg['text'] == '/ping':
self.bot.sendMessage(msg['chat']['id'], 'PONG!')
| nilq/baby-python | python |
import timeit
from copy import deepcopy
import time
import cProfile
import pstats
import numpy as np
from sympy import sin, symbols, Matrix, Symbol, exp, solve, Eq, pi, Piecewise, Function, ones
from CompartmentalSystems.moothmodel_run import SmoothModelRun
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
def smr_1d(nc):
#one-dimensional
C = Symbol('C')
state_vector = [C]
time_symbol = Symbol('t')
input_fluxes = {}
output_fluxes = {0: C}
internal_fluxes = {}
srm = SmoothReservoirModel(state_vector, time_symbol, input_fluxes, output_fluxes, internal_fluxes)
start_values = np.array([5])
times = np.linspace(0,1,6)
smr = SmoothModelRun(srm, {}, start_values, times)
smr.build_state_transition_operator_cache(nc)
return deepcopy(smr)
def smr_2d(nc):
# two-dimensional
C_0, C_1 = symbols('C_0 C_1')
state_vector = [C_0, C_1]
time_symbol = Symbol('t')
input_fluxes = {}
output_fluxes = {0: C_0, 1: C_1}
internal_fluxes = {}
srm = SmoothReservoirModel(state_vector, time_symbol, input_fluxes, output_fluxes, internal_fluxes)
start_values = np.array([5, 3])
times = np.linspace(0,1,100)
smr = SmoothModelRun(srm, {}, start_values, times)
smr.build_state_transition_operator_cache(nc)
return deepcopy(smr)
def age_densities(smr):#_1D(smr):
start_age_densities = lambda a: np.exp(-a)*smr.start_values
p=smr.pool_age_densities_func(start_age_densities)
p1_sv = smr._age_densities_1_single_value(start_age_densities)
# negative ages will be cut off automatically
ages = np.linspace(-1,1,3)
res=p(ages)
# main
reps=10
def funcmaker(f,*args):
def f_wihtout_args():
return f(*args)
return f_wihtout_args
for smr_func in [smr_1d,smr_2d]:
print('#####################################')
for nc in [10,100,1000]:#,10000]:
smr=smr_func(nc)
res=timeit.timeit(
#funcmaker(age_densities_1_single_value_2D,smr)
funcmaker(age_densities,smr)
,number=10
)
print('res',res)
#with cProfile.Profile() as pr:
# test_age_densities_1_single_value()
#
#st=pstats.Stats(pr)
#st.sort_stats('time')
#st.print_stats()
| nilq/baby-python | python |
from django.contrib import admin
from .models import User, Agent
class UserAdmin(admin.ModelAdmin):
list_display = ['username', 'is_agent', 'is_superuser']
admin.site.register(User, UserAdmin)
admin.site.register(Agent)
| nilq/baby-python | python |
'''
'''
def main():
info('Pump Microbone After Jan diode analysis')
close(description="Jan Inlet")
close(description= 'Microbone to Minibone')
open(description= 'Microbone to Turbo')
open(description= 'Microbone to Getter NP-10H')
open(description= 'Microbone to Getter NP-10C')
open(description= 'Microbone to CO2 Laser')
#open(description= 'CO2 Laser to Jan')
open(description= 'Microbone to Inlet Pipette')
sleep(1)
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# script by Ruchir Chawdhry
# released under MIT License
# github.com/RuchirChawdhry/Python
# ruchirchawdhry.com
# linkedin.com/in/RuchirChawdhry
from subprocess import run
from prettytable import PrettyTable
# PS: This only works on macOS & Linux. It will not work on Windows
# unless you install GNU coreutils:
# http://gnuwin32.sourceforge.net/packages/coreutils.htm
def folder_size(path):
size = run(["du", "-sk", path], capture_output=True, encoding="utf-8")
return size
def megabytes(size):
mb = int(size) / 1024
return round(mb, 2)
def gigabytes(size):
gb = (int(size) / 1024) / 1024
return round(gb, 2)
def table_print(data):
t = PrettyTable()
mb = megabytes(data[0])
gb = gigabytes(data[0])
t.field_names = ["Folder/Directory", "KB", "MB", "GB"]
t.add_row([data[1], data[0], mb, gb])
print(t)
if __name__ == "__main__":
try:
s = folder_size(input("PATH TO FOLDER/DIR: "))
s = str(s.stdout).split("\t")
table_print(s)
except ValueError:
print("Please enter a valid PATH without quotes or any other characters")
| nilq/baby-python | python |
import os
import logging
from counterblock.lib import config
def set_up(verbose):
global MAX_LOG_SIZE
MAX_LOG_SIZE = config.LOG_SIZE_KB * 1024 #max log size of 20 MB before rotation (make configurable later)
global MAX_LOG_COUNT
MAX_LOG_COUNT = config.LOG_NUM_FILES
# Initialize logging (to file and console)
logger = logging.getLogger() #get root logger
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
#Color logging on console for warnings and errors
logging.addLevelName(logging.WARNING, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
#Console logging
console = logging.StreamHandler()
console.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(module)s: %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
#File logging (rotated)
fileh = logging.handlers.RotatingFileHandler(config.LOG, maxBytes=MAX_LOG_SIZE, backupCount=MAX_LOG_COUNT)
fileh.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(module)s:%(message)s', '%Y-%m-%d-T%H:%M:%S%z')
fileh.setFormatter(formatter)
logger.addHandler(fileh)
#socketio logging (don't show on console in normal operation)
socketio_log = logging.getLogger('socketio')
socketio_log.setLevel(logging.DEBUG if verbose else logging.WARNING)
socketio_log.propagate = False
#Transaction log
tx_logger = logging.getLogger("transaction_log") #get transaction logger
tx_logger.setLevel(logging.DEBUG if verbose else logging.INFO)
tx_fileh = logging.handlers.RotatingFileHandler(config.TX_LOG, maxBytes=MAX_LOG_SIZE, backupCount=MAX_LOG_COUNT)
tx_fileh.setLevel(logging.DEBUG if verbose else logging.INFO)
tx_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(module)s:%(message)s', '%Y-%m-%d-T%H:%M:%S%z')
tx_fileh.setFormatter(tx_formatter)
tx_logger.addHandler(tx_fileh)
tx_logger.propagate = False
| nilq/baby-python | python |
from getpass import getpass
from pprint import pprint
from datetime import datetime
from sqlalchemy import create_engine
from pydango import state
from pydango.switchlang import switch
from pydango import (
primary_func,
secondary_func
)
from pydango.primary_func import chunks
from pydango.primary_func import (
create_sqlite_session,
random_number_generator,
)
from pydango.tables import (
Account,
Category,
Movie,
Payment,
Ticket,
Theater,
theater_schedule,
)
from sqlalchemy.sql import (
update,
and_,
)
# Unfortunate I could not find a way to get around creating a
# second connection the sqlite DB here
engine = create_engine('sqlite:///sqlite3.db')
engine, session = create_sqlite_session(engine=engine)
def run():
print('****************** Hello Cinephile ******************')
print()
show_commands()
while True:
action = primary_func.get_action()
with switch(action) as s:
s.case('c', create_account)
s.case('l', log_into_account)
s.case('o', logout)
s.case('s', list_movies)
s.case('n', browse_by_location)
s.case('t', browse_by_category)
s.case('r', purchase_ticket)
s.case('v', view_ticket)
s.case('m', lambda: 'change_mode')
s.case(['x', 'bye', 'exit', 'exit()'], secondary_func.exit_app)
s.default(secondary_func.unknown_command)
if action:
print()
if s.result == 'change_mode':
return
def show_commands():
print('What action would you like to take: ')
print('[C]reate an account')
print('[L]ogin to your account')
print('Log[O]ut of your account')
print('[R]eserve a movie ticket')
print('[V]iew your movie ticket')
print('[S]ee list of available movies')
print('Search for [N]earby theaters')
print('Search by ca[T]egory')
print('[M]ain menu')
print('e[X]it app')
print('[?] Help (this info)')
print()
def create_account():
print("****************** REGISTER ******************")
print()
print("Please provide the following information\n")
email = input("Email (required): ").strip().lower()
credit_card = input("Credit-card number (required, i.e. 4444333399993333): ").strip()
credit_card = int(credit_card)
password = getpass().strip()
zip_code = input("Zip-code (required): ").strip()
zip_code = int(zip_code)
first_name = input("What is your first name? ").strip()
last_name = input("What is your last name? ").strip()
old_account = session.query(Account).filter_by(email=email).first()
if old_account:
secondary_func.error_msg(f"ERROR: Account with email {email} already exists.")
return
account = Account(
email=email,
credit_card=credit_card,
password=password,
zip_code=zip_code,
first_name=first_name,
last_name=last_name
# exclude theater_owner attribute
)
session.add(account)
# Flush
my_account = session.query(Account).filter_by(email=email).first()
session.commit()
state.active_account = account
secondary_func.success_msg(f"\nCreated new account with id {state.active_account.id}")
def log_into_account():
print("****************** LOGIN ******************")
email = input("Email: ").strip()
password = getpass().strip()
account = session.query(Account).filter_by(email=email).first()
if not account:
secondary_func.error_msg(f"Could not find account with email ({email})")
return
elif account.password != password:
secondary_func.error_msg(f"Password does not match")
return
state.active_account = account
secondary_func.success_msg(f"\nYou are now logged in.")
# To help with testing in the Python shell
return state.active_account
def logout():
if state.active_account is None:
print("You are already logged-out.")
return
state.active_account = None
print("You are logged-out.")
def list_movies():
print("****************** BROWSE FOR MOVIES ******************")
print()
# Grab all Movie objects
movies = session.query(Movie).filter_by(active=True).all()
movies_list = [
i.__dict__.copy()
for i in movies
]
# movie __dict__ attribute contains _sa_instance_state which isn't useful
# popped = [i.pop('_sa_instance_state') for i in movies_list]
# create a movie_chunks generator out of movie_list
# to generate 3 items at a time
movie_chunks = chunks(movies_list, 5)
while True:
chunked = next(movie_chunks, None)
if chunked == None:
print("The End")
break
for i in chunked:
print(f"""\nTitle: {i['title']} | Rating: {i['rating']}
Description: {i['description']}""")
more = input("\n--More--<ENTER>\n")
if not more == "":
break
def browse_by_location():
print("****************** BROWSE FOR MOVIES BY LOCATION ******************")
print()
zip_code = input("Enter your zipcode: ").strip()
zip_code = int(zip_code)
theaters = session.query(Theater).filter_by(zip_code=zip_code).all()
if not theaters:
print("There are no theaters in that zip_code.")
by_city = input("Would you like to search by city (Yes or <ENTER to quit>)? ").strip()
if by_city == "":
return
city = input("Enter your city of residence: ").strip()
theaters = session.query(Theater).filter_by(city=city).all()
if not theaters:
print("Sorry, but there are no open theaters in your city.")
return
for i, theater in enumerate(theaters, 1):
movies = theater.movies
print(f"""\n{i}. {theater.name} at {theater.address} {theater.zip_code}
Open: {theater.open_time.strftime('%H:%M:%S')} | Close: {theater.close_time.strftime('%H:%M:%S')}
Prices: {theater.ticket_price}
""")
print(f"\n{theater.name}'s Movies:\n")
if movies:
for movie in movies:
movie = session.query(Movie).filter_by(id=movie.movie_id).first()
print(f"Title: {movie.title} | Rating: {movie.rating}\n")
else:
print("No movies playing currently due to COVID.")
print("Please check back when we get a government that cares about its people.")
def browse_by_category():
print("****************** BROWSE FOR MOVIES BY CATEGORY ******************")
print()
categories = session.query(Category).all()
categories_dict = {
'1': 'Drama',
'2': 'Action',
'3': 'Horror',
'4': 'Scifi',
'5': 'Romance',
'6': 'Comedy'
}
print("Movie categories: \n")
for i, category in enumerate(categories, 1):
print(f"{i}. {category.category_name}")
print()
category = input("Which category are you interested in (Enter a number): ").strip()
category = session.query(Category).filter_by(category_name=categories_dict[category]).first()
movies = category.movies
print(f"Movies for category: {category.category_name}\n")
for i, movie in enumerate(movies, 1):
print(i, movie.title)
def purchase_ticket():
print("****************** PURCHASE TICKETS ******************")
print()
if not state.active_account:
print("You must be logged in to purchase a ticket.")
return
# Get account credentials that were created on registration
account = state.active_account
# Grab the theater_schedule objects
schedules = session.query(theater_schedule).all()
print("\nMOVIE THEATER SCHEDULES\n")
# List all available movies and theaters and times
# with index loop so they can input a number representing an object
# that will later get mapped to elements of tuples appended to a list
index = 0
for i in schedules:
theater = session.query(Theater).filter_by(id=i.theater_id).first()
movie = session.query(Movie).filter_by(id=i.movie_id).first()
index += 1
print(f"""{index}: {theater.name} {theater.address}, Prices: {theater.ticket_price}
{movie.title}, Schedules: {i.time}, Seats: {i.seats_available}\n""")
ticket_number = input("\nEnter ticket number: ").strip()
ticket_number = int(ticket_number) - 1
quantity = input("How many tickets would you like to purchase: ").strip()
quantity = int(quantity)
category = input("Which category of tickets (i.e. Adult/Child): ").strip()
theaters_list = []
# Creat a tuple of the required information to purchase a ticket
# along with an index so the user can select a tuple
for i, x in enumerate(schedules, 1):
theater = session.query(Theater).filter_by(id=x.theater_id).first()
movie = session.query(Movie).filter_by(id=x.movie_id).first()
payment_id = random_number_generator()
payment_id = int(payment_id)
tup = (i, theater.id, movie.id, x.time, payment_id, account.id)
theaters_list.append(tup)
my_ticket = theaters_list[ticket_number]
# I need to figure out the price for the category chosen for
# this particular theater outside of the loop because we don't want to do this for every theater
my_theater = session.query(Theater).filter_by(id=my_ticket[1]).first()
my_movie = session.query(Movie).filter_by(id=my_ticket[2]).first()
ticket_price = float(my_theater.ticket_price[category])
total = ticket_price * quantity
ticket = Ticket(
theater_id=my_ticket[1],
movie_id=my_ticket[2],
time=my_ticket[3],
payment_id=my_ticket[4],
account_id=my_ticket[5],
quantity=quantity,
total=total
)
payment = Payment(
id=my_ticket[4],
credit_card=account.credit_card,
paid=True
)
session.add(ticket)
session.add(payment)
session.commit()
# I think there's gotta be a better way to do this, but what it's supposed to do
# is update the value of seats_available in theater_schedule
# everytime someone purchases a ticket
my_theater_schedule = session.query(theater_schedule).filter_by(
theater_id=my_ticket[1],
movie_id=my_ticket[2],
time=my_ticket[3]
).first()
new_seats_available = my_theater_schedule.seats_available - quantity
engine.execute(update(theater_schedule).where(and_(theater_schedule.c.theater_id==my_ticket[1],
theater_schedule.c.movie_id==my_ticket[2],
theater_schedule.c.time==my_ticket[3])).values(seats_available=new_seats_available))
ticket_receipt = session.query(Ticket).filter_by(id=ticket.id).first()
print("\nYour receipt: \n")
print(f"""Movie: {my_movie.title} | Location: {my_theater.name} at {my_theater.address}
Time: {ticket_receipt.time} | Quantity: {ticket_receipt.quantity} tickets
Total Price: ${total} \n
Payment Id: {payment.id} | Date of Purchase: {ticket_receipt.created.date()}""")
print("\nEnjoy your movie!\n")
def view_ticket():
print("****************** VIEW MY CURRENT TICKETS ******************")
print()
if not state.active_account:
print("You must be logged in to view a purchased ticket.")
return
# Grab account
account = state.active_account
# Get account-related tickets
tickets = session.query(Ticket).filter_by(account_id=account.id).all()
# If account has no tickets return
if not tickets:
return
# Return only valid tickets - tickets that were purchased today
today = datetime.today().date()
print("\nMy Tickets: \n")
for ticket in tickets:
if ticket.created.date() == today:
theater = session.query(Theater).filter_by(id=ticket.theater_id).first()
movie = session.query(Movie).filter_by(id=ticket.movie_id).first()
payment = session.query(Payment).filter_by(id=ticket.payment_id).first()
if not payment.paid:
status = 'Unpaid'
status = 'Paid'
print(f"""
Movie: {movie.title} | Location: {theater.name} at {theater.address}
Time: {ticket.time} | Quantity: {ticket.quantity} tickets
Total Price: ${ticket.total} | Status: {status}\n
Payment Id: {ticket.payment_id} | Date of Purchase: {ticket.created.date()}\n
""")
| nilq/baby-python | python |
def _foo():
return "private" | nilq/baby-python | python |
from collections import defaultdict
from itertools import islice
from typing import Dict, List, Optional, Sequence
import torch
from tango.common.dataset_dict import DatasetDictBase
from tango.common.exceptions import ConfigurationError
from tango.common.lazy import Lazy
from tango.common.tqdm import Tqdm
from tango.format import Format, JsonFormat
from tango.step import Step
from .data import DataLoader
from .eval_callback import EvalCallback
from .model import Model
from .util import check_dataset, move_to_device, resolve_device, set_seed_all
@Step.register("torch::eval")
class TorchEvalStep(Step):
"""
A PyTorch evaluation loop that pairs well with :class:`TorchTrainStep`.
.. tip::
Registered as a :class:`~tango.step.Step` under the name "torch::eval".
.. important::
The evaluation loop will use a GPU automatically if one is available.
You can control which GPU it uses with the environment variable ``CUDA_VISIBLE_DEVICES``.
For example, set ``CUDA_VISIBLE_DEVICES=1`` to force ``TorchEvalStep`` to only use
the GPU with ID 1.
.. warning::
By default the metrics specified by the ``metric_names`` parameter
are aggregated by simply averaging across batches.
This behavior is usually correct for metrics like "loss" or "accuracy",
for example, but may not be correct for other metrics like "F1".
If this is not correct for your metric you will need to handle the aggregation
internally in your model or with an :class:`EvalCallback`
using the :meth:`EvalCallback.post_batch()` method.
Then set the parameter ``auto_aggregate_metrics`` to ``False``.
"""
DETERMINISTIC = True
CACHEABLE = True
FORMAT: Format = JsonFormat()
SKIP_ID_ARGUMENTS = {"log_every"}
def run( # type: ignore[override]
self,
model: Model,
dataset_dict: DatasetDictBase,
dataloader: Lazy[DataLoader],
test_split: str = "test",
seed: int = 42,
eval_steps: Optional[int] = None,
log_every: int = 1,
metric_names: Sequence[str] = ("loss",),
auto_aggregate_metrics: bool = True,
callbacks: Optional[List[Lazy[EvalCallback]]] = None,
) -> Dict[str, float]:
"""
Evaluate the ``model``.
:param model:
The model to evaluate. It should return a ``dict`` from its ``forward()`` method
that includes all of the metrics in ``metric_names`` .
:param dataset_dict:
Should contain the test data.
:param dataloader:
The data loader that generates test batches. The batches should be :class:`dict`
objects.
:param test_split:
The name of the data split used for evaluation in the ``dataset_dict``.
Default is "test".
:param seed:
Used to set the RNG states at the beginning of the evaluation loop.
:param eval_steps:
The number of steps to evaluate for. If not specified evaluation will
stop after a complete iteration through the ``dataloader``.
:param log_every:
Log every this many steps. Default is ``1``.
:param metric_names:
The names of the metrics to track and aggregate. Default is ``("loss",)``.
:param auto_aggregate_metrics:
If ``True`` (the default), the metrics will be averaged across batches.
This may not be the correct behavior for some metrics (such as F1),
in which you should set this to ``False`` and handle the aggregation
internally in your model or with an :class:`EvalCallback`
(using :meth:`EvalCallback.post_batch()`).
:param callbacks:
A list of :class:`EvalCallback`.
"""
set_seed_all(seed)
check_dataset(dataset_dict, test_split)
# Resolve device.
device = resolve_device()
# Prep model.
model = model.eval().to(device)
# Construct dataloader.
dataloader: DataLoader = dataloader.construct(dataset=dataset_dict[test_split])
steps: int
try:
dataloader_len = len(dataloader)
steps = dataloader_len if eval_steps is None else min(dataloader_len, eval_steps)
except TypeError:
if eval_steps is None:
raise ConfigurationError(
"You must set 'eval_steps' for streaming/iterable datasets"
)
else:
steps = eval_steps
# Initialize callbacks.
callbacks: List[EvalCallback] = [
callback.construct(
step_id=self.unique_id,
work_dir=self.work_dir,
model=model,
dataset_dict=dataset_dict,
dataloader=dataloader,
)
for callback in (callbacks or [])
]
for callback in callbacks:
callback.pre_eval_loop()
eval_batches = enumerate(islice(dataloader, steps))
running_metrics: Dict[str, float] = defaultdict(float)
aggregated_metrics: Dict[str, float] = {}
with Tqdm.tqdm(eval_batches, desc="Evaluating", total=steps) as batch_iter:
for step, batch in batch_iter:
should_log_this_step = step % log_every == 0 or step == steps - 1
for callback in callbacks:
callback.pre_batch(step, batch)
batch = move_to_device(batch, device)
with torch.inference_mode():
outputs = model(**batch)
for callback in callbacks:
callback.post_batch(step, outputs)
# Gather metrics we want to track.
batch_metrics = {
k: outputs[k].item() if isinstance(outputs[k], torch.Tensor) else outputs[k]
for k in metric_names
}
# Aggregate metrics.
if auto_aggregate_metrics:
for k in batch_metrics:
running_metrics[k] += batch_metrics[k]
aggregated_metrics[k] = running_metrics[k] / (step + 1)
else:
aggregated_metrics.update(batch_metrics)
# Update progress bar.
if should_log_this_step:
batch_iter.set_postfix(**aggregated_metrics)
# Clean up to help garbage collector. Hopefully this saves memory.
del batch
del outputs
del batch_metrics
for callback in callbacks:
callback.post_eval_loop(aggregated_metrics)
return aggregated_metrics
| nilq/baby-python | python |
import sproxel
from zipfile import ZipFile, ZIP_DEFLATED
import json
import os, sys
import imp
CUR_VERSION=1
def save_project(filename, proj):
# gather layers
layers=[]
for spr in proj.sprites:
for l in spr.layers:
if l not in layers: layers.append(l)
# prepare metadata
meta={}
meta['version']=CUR_VERSION
meta['layers']=[
dict(name=l.name, offset=l.offset, visible=l.visible,
palette = proj.palettes.index(l.palette) if l.palette!=None else -1)
for l in layers]
meta['sprites']=[
dict(name=s.name, layers=[layers.index(l) for l in s.layers], curLayer=s.curLayerIndex)
for s in proj.sprites]
meta['palettes']=[
dict(name=p.name, colors=p.colors)
for p in proj.palettes]
meta['mainPalette']=proj.palettes.index(proj.mainPalette)
# write zip file
with ZipFile(filename, 'w', ZIP_DEFLATED) as zf:
zf.writestr('metadata.json', json.dumps(meta, sort_keys=True, indent=2))
for i, l in enumerate(layers): zf.writestr('%04d.png' % i, l.toPNG())
return True
def load_project(filename):
prj=sproxel.Project()
with ZipFile(filename, 'r') as zf:
meta=json.loads(zf.read('metadata.json'))
# load palettes
palettes=[]
for mp in meta['palettes']:
p=sproxel.Palette()
p.name=mp['name']
p.colors=[tuple(c) for c in mp['colors']]
palettes.append(p)
prj.palettes=palettes
try:
prj.mainPalette=palettes[meta['mainPalette']]
except IndexError:
try:
prj.mainPalette=palettes[0]
except IndexError:
prj.mainPalette=sproxel.Palette()
# load layers
layers=[]
for i, ml in enumerate(meta['layers']):
l=sproxel.layer_from_png(zf.read('%04d.png' % i),
prj.palettes[ml['palette']] if ml['palette']>=0 else None)
l.name =ml['name' ]
l.offset =tuple(ml['offset'])
l.visible=ml['visible']
print 'layer', i, 'type', l.dataType
layers.append(l)
# load sprites
sprites=[]
for ms in meta['sprites']:
s=sproxel.Sprite()
s.name=ms['name']
for i, li in enumerate(ms['layers']):
l=layers[li]
s.insertLayerAbove(i, l)
s.curLayerIndex=ms['curLayer']
sprites.append(s)
prj.sprites=sprites
#print prj.sprites
return prj
def init_plugin_pathes():
sproxel.plugin_pathes=[os.path.abspath(p) for p in sproxel.plugin_pathes]
sys.path=sproxel.plugin_pathes+sys.path
def scan_plugin_module(name, fn):
mod=imp.load_source(name, fn)
try:
info=mod.plugin_info
except KeyError:
return
print ' plugin', name, fn
info['module']=name
info['path']=fn
sproxel.plugins_info[name]=info
sproxel.plugins[name]=mod
def scan_plugins():
sproxel.plugins_info=dict()
sproxel.plugins=dict()
for path in sproxel.plugin_pathes:
#print 'scanning', path
for name in os.listdir(path):
fn=os.path.join(path, name)
if os.path.isdir(fn):
fn=os.path.join(fn, '__init__.py')
if os.path.isfile(fn):
scan_plugin_module(name, fn)
else:
modname, ext = os.path.splitext(name)
if ext.lower()=='.py':
scan_plugin_module(modname, fn)
def register_plugins():
for mod in sproxel.plugins.itervalues():
if hasattr(mod, 'register'):
print 'registering plugin', mod.plugin_info['module']
try:
mod.register()
except:
sys.excepthook(*sys.exc_info())
print 'error registering plugin', mod.plugin_info['name']
def unregister_plugins():
for mod in sproxel.plugins.itervalues():
if hasattr(mod, 'unregister'):
print 'unregistering plugin', mod.plugin_info['module']
try:
mod.unregister()
except:
sys.excepthook(*sys.exc_info())
print 'error unregistering plugin', mod.plugin_info['name']
| nilq/baby-python | python |
import uuid
from django.db import models
class Dice(models.Model):
sides = models.PositiveIntegerField()
class Roll(models.Model):
roll = models.PositiveIntegerField()
class DiceSequence(models.Model):
uuid = models.UUIDField(primary_key=False, default=uuid.uuid4, editable=True, unique=True)
seq_name = models.CharField(max_length=256)
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey('auth.User', related_name='dice_sequence', on_delete=models.CASCADE)
sequence = models.ManyToManyField(Dice)
class RollSequence(models.Model):
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey('auth.User', related_name='roll_sequence', on_delete=models.CASCADE)
roll_sequence = models.ManyToManyField(Roll)
dice_sequence = models.ForeignKey(DiceSequence, related_name='+', on_delete=models.PROTECT)
class Meta:
ordering = ('created',)
| nilq/baby-python | python |
class Solution(object):
def XXX(self, n):
"""
:type n: int
:rtype: str
"""
if not isinstance(n, int):
return ""
if n == 1:
return "1"
pre_value = self.XXX(n-1) # 递归
# 双指针解法
i = 0
res = ""
for j in range(len(pre_value)):
if pre_value[j] != pre_value[i]:
res += str(j-i) + pre_value[i]
i = j
res += str(len(pre_value)-i) + pre_value[i]
return res
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from django.conf import settings
import requests
from sendsms.backends.base import BaseSmsBackend
TINIYO_API_URL = "https://api.tiniyo.com/v1/Account/SENDSMS_TINIYO_TOKEN_ID/Message"
TINIYO_TOKEN_ID = getattr(settings, "SENDSMS_TINIYO_TOKEN_ID", "")
TINIYO_TOKEN_SECRET = getattr(settings, "SENDSMS_TINIYO_TOKEN_SECRET", "")
class SmsBackend(BaseSmsBackend):
"""
Tiniyo gateway backend. (https://tiniyo.com)
Docs in https://tiniyo.com/docs/#/quickstart
Settings::
SENDSMS_BACKEND = 'sendsms.backends.tiniyo.SmsBackend'
SENDSMS_TINIYO_TOKEN_ID = 'xxx'
SENDSMS_TINIYO_TOKEN_SECRET = 'xxx'
Usage::
from sendsms import api
api.send_sms(
body='This is first sms to tiniyo', from_phone='TINIYO', to=['+13525051111']
)
"""
def send_messages(self, messages):
payload = []
for m in messages:
entry = {"src": m.from_phone, "dst": m.to, "text": m.body}
payload.append(entry)
api_url = TINIYO_API_URL.replace("SENDSMS_TINIYO_TOKEN_ID", TINIYO_TOKEN_ID)
response = requests.post(
api_url, json=payload, auth=(TINIYO_TOKEN_ID, TINIYO_TOKEN_SECRET)
)
if response.status_code != 200:
if self.fail_silently:
return False
raise Exception(
"Error: %d: %s"
% (response.status_code, response.content.decode("utf-8"))
)
return True
| nilq/baby-python | python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
from fairseq.models.transformer import TransformerEncoder
from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer
class LinformerTransformerEncoder(TransformerEncoder):
"""
Implementation for a Bi-directional Linformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
LinformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(self, args, dictionary, embed_tokens):
self.compress_layer = None
super().__init__(args, dictionary, embed_tokens)
def build_encoder_layer(self, args):
if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None:
compress_layer = nn.Linear(
self.args.max_positions,
self.args.max_positions // self.args.compressed,
)
# intialize parameters for compressed layer
nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2))
if self.args.freeze_compress == 1:
compress_layer.weight.requires_grad = False
self.compress_layer = compress_layer
return LinformerTransformerEncoderLayer(args, self.compress_layer)
| nilq/baby-python | python |
# file arrange, remove, rename
import os
import astropy.io.fits as fits
def oswalkfunc():
f=open('oswalk.list','w')
#workDIr = os.path.abspath(b'.')
for root, dirs, files in os.walk('.'): # os.walk(".", topdown = False):
# all files with path names
for name in files:
#print(os.path.join(root, name))
f.write(os.path.join(root, name)+'\n')
f.close()
with open('oswalk.list','r') as file_handle: lines = file_handle.read().splitlines()
print(len(lines),'files')
return lines
# lines = [line.strip() for line in file_handle]
def fnamechange(ii):
#for CCA250
i=ii.split('/')[-1]
head=fits.getheader(ii)
objname=head['OBJECT']
dateobs=head['DATE-OBS']
datestr=dateobs[:4]+dateobs[5:7]+dateobs[8:10]+'-'+dateobs[11:13]+dateobs[14:16]+dateobs[17:20]
filterstr=head['FILTER']
exptimestr=str(int(head['EXPTIME']))
newname='Calib-CCA250-'+objname+'-'+datestr+'-'+filterstr+'-'+exptimestr+'.fits'
print('cp '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+newname)
os.system('cp '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+newname)
def LSGTfilechange(ii):
# From Calib-LSGT-NGC3367-20180519-220208-g-BIN1-W-180-003.fits
# To Calib-LSGT-NGC3367-20180519-220208-g-180.fits
i=ii.split('/')[-1]
frag=i.split('-')
frag[0]=='Calib'
# if frag[1]=='T52' : obs='LSGT'
# else : obs=frag[1]
finalname='Calib-LSGT'+'-'+frag[2]+'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[8]+'.fits'
os.system('mv '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+finalname)
def iTelfilechange(ii):
# From Calib-T21-ceouobs.changsu-NGC3367-20161130-042831-R-BIN1-E-180-003.fits
# To Calib-T21-NGC3367-20161130-042831-R-180.fits
i=ii.split('/')[-1]
frag=i.split('-')
frag[0]=='Calib'
# if frag[1]=='T52' : obs='LSGT'
# else : obs=frag[1]
#finalname='Calib-'+ frag[1] +'-'+frag[2]+'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[8]+'.fits'
finalname='Calib-'+ frag[1] +'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[6]+'-'+frag[9]+'.fits'
os.system('mv '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+finalname)
def simplerename(ii,a,b):
'''
simplerename(filename, from, to)
'''
import os
#i=ii.split('/')[-1]
os.system('rename '+a+' '+b+' '+ii)
def oswalknamesep(i):
filename=i.split('/')[-1]
head='/'.join(i.split('/')[:-1])+'/'
return filename, head
###########################################################################
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
files=os.listdir('.')
dirs=[i for i in files if os.path.isdir(i)]
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in lines :
if ('Cal' in i and 'psf' in i) or ('merge.cat' in i) or ('Cal' in i and '.xml' in i) or ('Cal' in i and '.png' in i) or ('Cal' in i and '.cat' in i) or ('Cal' in i and 'seg' in i) or ('hdre' in i ) or ('reCal' in i ) or ('recCal' in i) or ('wr' in i and '.fit' in i) or ('gregister' in i) :
# if 'com.cat' in i :
print('remove', i)
os.remove(i)
## LSGT
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'cCalib' in i :
print('rename', i)
os.system('rename cCalib Calib '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'Calibrated' in i :
print('rename', i)
os.system('rename Calibrated Calib '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'T52-ceouobs.changsu' in i :
print('rename', i)
os.system('rename T52-ceouobs.changsu LSGT '+i)
if 'T52-ceouobs.joonho' in i :
print('rename', i)
os.system('rename T52-ceouobs.joonho LSGT '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if ('LSGT' in i) and ('BIN' in i) :
print('rename', i)
LSGTfilechange(i)
## CCA250
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'CCA250' in i and '.new' in i :
print('rename & remove', i)
fnamechange(i)
os.remove(i)
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'CCA250' in i:
os.system('rename NGC3367-18 NGC3367-2018 '+i)
os.system('rename NGC3367-17 NGC3367-2017 '+i)
os.system('rename Calibrated Calib '+i)
os.system('rename 0.0.fits 0.fits '+i)
os.system('rename 00.fits .fits '+i)
os.system('rename ..fits .fits '+i)
## CCA250 directory and files
os.chdir('CCA250')
os.system('rename 100-c 100c Calib*.fits')
os.system('mv *-m575-* m575/')
os.system('mv *-m625-* m625/')
os.system('mv *-m675-* m675/')
os.system('mv *-m725-* m725/')
os.system('mv *-m775-* m775/')
os.system('mv *-V-* V/')
os.system('mv *-R-* R/')
os.chdir('c')
os.system('rename 100-c 100c Calib*.fits')
os.system('mv *-100c-* ../100c')
os.chdir('..')
os.rmdir('c')
os.system('rename NGC3367-18 NGC3367-2018 Calib*.fits')
os.system('rename NGC3367-17 NGC3367-2017 Calib*.fits')
os.system('rename 0.0.fits 0.fits Calib*.fits')
os.system('rename 00.fits .fits Calib*.fits')
os.system('rename ..fits .fits Calib*.fits')
## itelescope T21
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'Calib-T21-ceou' in i:
print('file name :',i)
iTelfilechange(i)
## MAO SNUCAM
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'SNUCAM' in i :
if ('reaCal' in i) or ('reCal' in i) or ('aCalib' in i) or('Calib-MAIDANAK' in i):
print('remove',i)
os.remove(i)
## MCD30INCH
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'MCD30INCH' in i :
print(i)
if not 'Calib-MCD30INCH' in i:
print( 'rename ',i)
simplerename(i,'Cal-30inch','Calib-MCD30INCH')
'''
!rename Cal-30inch Calib-MCD30INCH Cal*.fits
!rename Calib-30inch Calib-MCD30INCH Cal*.fits
!rename Calib-MCD30inch Calib-MCD30INCH Cal*.fits
'''
## SOAO
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'SOAO' in i and 'SOAO_FLI' in i:
print ('rename',i)
simplerename(i,'SOAO_FLI','SOAO')
if 'SOAO' in i and 'SOAO_FLI4k' in i:
print ('rename',i)
simplerename(i,'SOAO_FLI4k','SOAO')
if 'SOAO' in i and 'SOAO4k' in i:
print ('rename',i)
simplerename(i,'SOAO4k','SOAO')
## DOAO
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
print(len(fitslist))
for i in fitslist:
if 'gregister' in i: os.remove(i)
lines= oswalkfunc()
lines.sort()
| nilq/baby-python | python |
from django.db import models
class TrackedModel(models.Model):
"""
a model which keeps track of creation and last updated time
"""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
'''
Created by 15 cm on 11/22/15 3:20 PM
Copyright © 2015 15cm. All rights reserved.
'''
__author__ = '15cm'
import json
import urllib2
import multiprocessing
import numpy as np
from PIL import Image
import io
import os
CURPATH = os.path.split(os.path.realpath(__file__))[0]
DATAPATH = os.path.join(os.path.dirname(CURPATH),'dataset')
def download_stuff(stuff):
image_bytes = urllib2.urlopen(stuff.link).read()
data_stream = io.BytesIO(image_bytes)
pil_image = Image.open(data_stream)
try:
pil_image.load()
except IOError:
pass
w,h = pil_image.size
pil_image.thumbnail((w/3,h/3))
pil_image.save(os.path.join(DATAPATH,str(stuff.id)+'.jpg'),'jpeg')
class DataHandler:
class ImageData:
def __init__(self,id,link,label):
self.id = id
self.link = link
self.label = label
def __init__(self):
self.data = [] # [(link,label),...]
self.label_dict = {}
self.label_list = []
self.data_file = os.path.join(DATAPATH,'data.txt')
self.label_list_file = os.path.join(DATAPATH,'label_list.json')
def label_filter(self,s):
# valid_word_list = ['衣','裙','裤','长','大','短','单','套','衫','毛']
valid_word_list = ['裙','衣','裤']
valid_word_set = set((map(lambda x: x.decode('utf-8'),valid_word_list)))
res_str = ''
if not isinstance(s,unicode):
s = s.decode('utf-8')
for word in s:
if word in valid_word_set:
res_str += word
break
if not res_str:
res_str = '其他'.decode('utf-8')
return res_str.encode('utf-8')
def parse_data(self,json_file):
file = os.path.join(DATAPATH,json_file)
with open(file) as f:
json_content = json.load(f)
for item in json_content:
id=int(item['id'])
label = self.label_filter(item['sub_category'])
link = item['picture']
if not self.label_dict.has_key(label):
self.label_list.append(label)
self.label_dict[label] = len(self.label_list) - 1
self.data.append(self.ImageData(id, link, self.label_dict[label]))
def download(self,num = -1,id_geq = 0):
if num > 0:
data = [x for x in self.data if x.id < num and x.id > id_geq]
else:
data = [x for x in self.data if x.id > id_geq]
pool = multiprocessing.Pool(processes=5)
pool.map(download_stuff,data)
def save(self):
# data_matrix:
# id label
# ... ...
data_matrix = np.empty((len(self.data),2))
for i in range(len(self.data)):
data_matrix[i][0] = self.data[i].id
data_matrix[i][1] = self.data[i].label
np.savetxt(self.data_file,data_matrix)
with open(self.label_list_file,'w') as f:
json.dump(self.label_list,f)
def load(self):
self.data_matrix = np.loadtxt(self.data_file)
with open(self.label_list_file) as f:
self.label_list = json.load(f)
def get_lables(self,id = -1):
if id >= 0:
return self.data_matrix[id][1]
else:
return self.data_matrix[:,1]
def tell_label(self,label):
return self.label_list[label]
| nilq/baby-python | python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .pygame_component import Pygame
from .pygame_surface import PygameSurface
from .blit_surface import BlitSurface
from .blocking_pygame_event_pump import BlockingPygameEventPump
from .color_fill import ColorFill
from .draw_on_resized import DrawOnResized
from .resize_event_on_videoresize import ResizeEventOnVideoresize
from .surface_draw_event import SurfaceDrawEvent
| nilq/baby-python | python |
#!/usr/bin/python
'''
(C) Copyright 2018-2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import os
import traceback
from apricot import TestWithServers
from daos_api import DaosPool, DaosServer, DaosApiError
class PoolSvc(TestWithServers):
"""
Tests svc argument while pool create.
:avocado: recursive
"""
def tearDown(self):
try:
if self.pool is not None and self.pool.attached:
self.pool.destroy(1)
finally:
super(PoolSvc, self).tearDown()
def test_poolsvc(self):
"""
Test svc arg during pool create.
:avocado: tags=pool,svc
"""
# parameters used in pool create
createmode = self.params.get("mode", '/run/createtests/createmode/*/')
createuid = os.geteuid()
creategid = os.getegid()
createsetid = self.params.get("setname", '/run/createtests/createset/')
createsize = self.params.get("size", '/run/createtests/createsize/')
createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')
expected_result = createsvc[1]
try:
# initialize a python pool object then create the underlying
# daos storage
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid,
createsize, createsetid, None, None, createsvc[0])
self.pool.connect(1 << 1)
# checking returned rank list for server more than 1
iterator = 0
while (
int(self.pool.svc.rl_ranks[iterator]) > 0 and
int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and
int(self.pool.svc.rl_ranks[iterator]) != 999999
):
iterator += 1
if iterator != createsvc[0]:
self.fail("Length of Returned Rank list is not equal to "
"the number of Pool Service members.\n")
rank_list = []
for iterator in range(createsvc[0]):
rank_list.append(int(self.pool.svc.rl_ranks[iterator]))
if len(rank_list) != len(set(rank_list)):
self.fail("Duplicate values in returned rank list")
self.pool.pool_query()
leader = self.pool.pool_info.pi_leader
if createsvc[0] == 3:
# kill pool leader and exclude it
self.pool.pool_svc_stop()
self.pool.exclude([leader])
# perform pool disconnect, try connect again and disconnect
self.pool.disconnect()
self.pool.connect(1 << 1)
self.pool.disconnect()
# kill another server which is not a leader and exclude it
server = DaosServer(self.context, self.server_group, leader - 1)
server.kill(1)
self.pool.exclude([leader - 1])
# perform pool connect
self.pool.connect(1 << 1)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
| nilq/baby-python | python |
from UE4Parse.BinaryReader import BinaryStream
class FPathHashIndexEntry:
FileName: str
Location: int
def __init__(self, reader: BinaryStream):
self.FileName = reader.readFString()
self.Location = reader.readInt32()
| nilq/baby-python | python |
from typing import List
import cv2
from vision.domain.iCamera import ICamera
from vision.domain.iCameraFactory import ICameraFactory
from vision.infrastructure.cvCamera import CvCamera
from vision.infrastructure.cvVisionException import CameraDoesNotExistError
from vision.infrastructure.fileCamera import FileCamera
class CvCameraFactory(ICameraFactory):
def __init__(self, max_camera_count: int = 10) -> None:
self._max_camera_count = max_camera_count
self._cameras: List[int] = [1337]
self._find_all_camera()
def get_cameras(self) -> List[int]:
return self._cameras
def create_camera(self, index: int) -> ICamera:
if index not in self._cameras:
raise CameraDoesNotExistError(index)
if index == 1337:
return FileCamera('./vision/infrastructure/2.jpg')
return CvCamera(index)
def _find_all_camera(self) -> None:
index = 0
while index < self._max_camera_count:
cap = cv2.VideoCapture(index)
if cap.isOpened():
cap.release()
self._cameras.append(index)
index += 1
| nilq/baby-python | python |
import contextlib
import logging
import six
import py.test
_LOGGING_CONFIGURED_STREAM = None
@py.test.fixture(scope="session")
def streamconfig():
global _LOGGING_CONFIGURED_STREAM
if not _LOGGING_CONFIGURED_STREAM:
_LOGGING_CONFIGURED_STREAM = six.StringIO()
logging.basicConfig(
stream=_LOGGING_CONFIGURED_STREAM, level=logging.INFO
)
@contextlib.contextmanager
def manager():
_LOGGING_CONFIGURED_STREAM.truncate(0) # reset stream
_LOGGING_CONFIGURED_STREAM.seek(0) # rewind stream
yield _LOGGING_CONFIGURED_STREAM
_LOGGING_CONFIGURED_STREAM.seek(0) # rewind stream
return manager
_MESSAGES = (
"Hello world",
"My hovercraft is full of eels",
"49.3",
)
@py.test.fixture(scope="function", params=_MESSAGES)
def message(request):
return request.param
_MODULE_NAMES = (
"tests.mountains",
"tests.music.instruments.cymbal",
"tests.music",
"tests.discombobulate",
"tests.music.instruments",
"tests.mountains.ventoux",
)
@py.test.fixture(scope="function", params=_MODULE_NAMES)
def module_name(request):
return request.param
| nilq/baby-python | python |
import re
# Solution
def part1(data, multiplier = 1):
pattern = r'\d+'
(player_count, marble_count) = re.findall(pattern, data)
(player_count, marble_count) = (int(player_count), int(marble_count) * multiplier)
players = [0] * player_count
marbles = DoubleLinkedList(0)
k = 0
for i in range(1, marble_count + 1):
if i % 23 == 0:
players[k] += (i + marbles.remove_node())
else:
marbles.add_node(i)
k = (k + 1) % player_count
return max(x for x in players)
def part2(data, multiplier):
return part1(data, 100)
class DoubleLinkedList:
def __init__(self, initial_value):
initial_node = DoubleLinkedListNode(initial_value)
initial_node.prev = initial_node
initial_node.next = initial_node
self.current = initial_node
def add_node(self, node_value):
left = self.current.next
right = self.current.next.next
new_node = DoubleLinkedListNode(node_value, left, right)
left.next = new_node
right.prev = new_node
self.current = new_node
def remove_node(self):
for _ in range(7):
self.current = self.current.prev
val = self.current.value
self.current.prev.next = self.current.next
self.current.next.prev = self.current.prev
self.current = self.current.next
return val
class DoubleLinkedListNode:
def __init__(self, value, prev = None, next = None):
self.value = value
self.prev = prev
self.next = next
# Tests
def test(expected, actual):
assert expected == actual, 'Expected: %r, Actual: %r' % (expected, actual)
test(32, part1('9 players; last marble is worth 25 points'))
test(8317, part1('10 players; last marble is worth 1618 points'))
test(146373, part1('13 players; last marble is worth 7999 points'))
test(2764, part1('17 players; last marble is worth 1104 points'))
test(54718, part1('21 players; last marble is worth 6111 points'))
test(37305, part1('30 players; last marble is worth 5807 points'))
test(8317, part1('10 players; last marble is worth 1618 points'))
# Solve real puzzle
filename = 'data/day09.txt'
data = [line.rstrip('\n') for line in open(filename, 'r')][0]
print('Day 09, part 1: %r' % (part1(data)))
print('Day 09, part 2: %r' % (part2(data, 100)))
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import hexchat
import re
__module_name__ = "DeadKeyFix"
__module_version__ = "2.2"
__module_description__ = "Fixes the Us-International deadkey issue"
prev = ''
def keypress_cb(word, word_eol, userdata):
global prev
specialChars = {
'65104': {
'a': u'à',
'o': u'ò',
'e': u'è',
'i': u'ì',
'u': u'ù',
'A': u'À',
'O': u'Ò',
'E': u'È',
'I': u'Ì',
'U': u'Ù'
},
'65105': {
'a': u'á',
'o': u'ó',
'e': u'é',
'i': u'í',
'u': u'ú',
'y': u'ý',
'c': u'ç',
'A': u'Á',
'O': u'Ó',
'E': u'É',
'I': u'Í',
'U': u'Ú',
'Y': u'Ý',
'C': u'Ç'
},
'65106': {
'a': u'â',
'o': u'ô',
'e': u'ê',
'i': u'î',
'u': u'û',
'A': u'Â',
'O': u'Ô',
'E': u'Ê',
'I': u'Î',
'U': u'Û'
},
'65107': {
'a': u'ã',
'o': u'õ',
'n': u'ñ',
'A': u'Ã',
'O': u'Õ',
'N': u'Ñ'
},
'65111': {
'a': u'ä',
'o': u'ö',
'e': u'ë',
'i': u'ï',
'u': u'ü',
'y': u'ÿ',
'A': u'Ä',
'O': u'Ö',
'E': u'Ë',
'I': u'Ï',
'U': u'Ü',
'Y': u'Ÿ'
}
}
accents = {
'65104': '`',
'65105': "'",
'65106': '^',
'65107': '~',
'65111': '"'
}
charset = hexchat.get_info('charset')
#When there is no current charset derived from server or channel it is set to IRC
#IRC is not a recognized encoding type so default to utf-8 in that case.
if(charset == "IRC"):
charset = "utf-8"
text = hexchat.get_info('inputbox')
loc = hexchat.get_prefs("state_cursor")
if prev in accents and word[2] in specialChars[prev]:
#insert an accented character
text = insert(specialChars[prev][word[2]],text,loc)
elif prev in accents and word[2] == ' ':
#insert a clean accent ( input was accent followed by space )
text = insert(accents[prev],text,loc)
elif prev in accents and word[0] in accents:
#Insert two accents ( input was accent followed by accent )
text = insert(accents[prev] + accents[word[0]],text,loc)
loc+=1
elif prev in accents and int(word[3]) != 0:
#insert an accent and a character (character and accent do not combine)
text = insert(accents[prev] + word[2],text,loc)
loc+=1
elif word[0] in accents:
#store an accent input
prev = word[0]
return
else:
#regular character input
if int(word[3]) != 0:
prev = ''
return
prev = ''
settex = u"settext " + text
hexchat.command( settex )
hexchat.command('setcursor {}'.format(loc+1))
return hexchat.EAT_HEXCHAT
def unload_cb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
def insert(char,text,loc):
return u"{}{}{}".format(text[:loc] , char , text[loc:])
hexchat.hook_print('Key Press', keypress_cb)
hexchat.hook_unload(unload_cb)
print(__module_name__, 'version', __module_version__, 'loaded.')
| nilq/baby-python | python |
import FWCore.ParameterSet.Config as cms
process = cms.Process("TREESPLITTER")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(0)
)
process.TreeSplitterModule = cms.EDAnalyzer(
"TreeSplitter",
InputFileName = cms.string("/afs/cern.ch/user/d/demattia/scratch0/MuScleFit/CMSSW_3_11_0/src/MuonAnalysis/MomentumScaleCalibration/test/StatisticalErrors/Tree_MCFall2010_INNtk_CRAFTRealistic_wGEN.root"),
OutputFileName = cms.string("SubSample.root"),
MaxEvents = cms.int32(MAXEVENTS),
SubSampleFirstEvent = cms.uint32(SUBSAMPLEFIRSTEVENT),
SubSampleMaxEvents = cms.uint32(SUBSAMPLEMAXEVENTS)
)
process.p1 = cms.Path(process.TreeSplitterModule)
| nilq/baby-python | python |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .ledger_close_value_signature import LedgerCloseValueSignature
from .stellar_value_type import StellarValueType
from ..exceptions import ValueError
__all__ = ["StellarValueExt"]
class StellarValueExt:
"""
XDR Source Code
----------------------------------------------------------------
union switch (StellarValueType v)
{
case STELLAR_VALUE_BASIC:
void;
case STELLAR_VALUE_SIGNED:
LedgerCloseValueSignature lcValueSignature;
}
----------------------------------------------------------------
"""
def __init__(
self, v: StellarValueType, lc_value_signature: LedgerCloseValueSignature = None,
) -> None:
self.v = v
self.lc_value_signature = lc_value_signature
def pack(self, packer: Packer) -> None:
self.v.pack(packer)
if self.v == StellarValueType.STELLAR_VALUE_BASIC:
return
if self.v == StellarValueType.STELLAR_VALUE_SIGNED:
if self.lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
self.lc_value_signature.pack(packer)
return
raise ValueError("Invalid v.")
@classmethod
def unpack(cls, unpacker: Unpacker) -> "StellarValueExt":
v = StellarValueType.unpack(unpacker)
if v == StellarValueType.STELLAR_VALUE_BASIC:
return cls(v)
if v == StellarValueType.STELLAR_VALUE_SIGNED:
lc_value_signature = LedgerCloseValueSignature.unpack(unpacker)
if lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
return cls(v, lc_value_signature=lc_value_signature)
raise ValueError("Invalid v.")
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "StellarValueExt":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "StellarValueExt":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.v == other.v and self.lc_value_signature == other.lc_value_signature
def __str__(self):
out = []
out.append(f"v={self.v}")
out.append(
f"lc_value_signature={self.lc_value_signature}"
) if self.lc_value_signature is not None else None
return f"<StellarValueExt {[', '.join(out)]}>"
| nilq/baby-python | python |
import setuptools
def get_requires(filename):
requirements = []
with open(filename) as req_file:
for line in req_file.read().splitlines():
if not line.strip().startswith("#"):
requirements.append(line)
return requirements
with open("Readme.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setuptools.setup(
name="recap",
version="1.0.0",
author="Croydon",
author_email="cr0ydon@outlook.com",
description="An example how a testing environment can look like in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Croydon/pt-recap",
packages=setuptools.find_packages(exclude=["tests"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
keywords=["testing", "requests", "calculations", "templates"],
install_requires=get_requires("requirements.txt"),
extras_require={
"test": get_requires("requirements_test.txt")
},
package_data={
'': ['*.md'],
'': ['data/*.tmpl']
},
entry_points={
'console_scripts': [
'recap=recap.main:run',
],
},
)
| nilq/baby-python | python |
REGISTRY = {}
from .sc_agent import SCAgent
from .rnn_agent import RNNAgent
from .latent_ce_dis_rnn_agent import LatentCEDisRNNAgent
REGISTRY["rnn"] = RNNAgent
REGISTRY["latent_ce_dis_rnn"] = LatentCEDisRNNAgent
REGISTRY["sc"] = SCAgent
| nilq/baby-python | python |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the Apache License Version 2.0.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License Version 2.0 for more details.
# ============================================================================
import tensorflow as tf
from tensorflow.python.keras import layers as keras_layers
class FeedForwardNetwork(keras_layers.Layer):
def __init__(self, hidden_size, filter_size, relu_dropout):
super(FeedForwardNetwork, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.filter_dense_layer = keras_layers.Dense(
filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_layer")
self.output_dense_layer = keras_layers.Dense(
hidden_size, use_bias=True, name="output_layer")
def call(self, x, training):
"""
Args:
x: A tensor with shape [batch_size, length, hidden_size]
training (boolean): whether in training mode or not.
Returns:
Output of the feedforward network.
tensor with shape [batch_size, length, hidden_size]
"""
# input_shape = tf.shape(x)
# batch_size, length = input_shape[0], input_shape[1]
output = self.filter_dense_layer(x)
if training:
output = tf.nn.dropout(output, rate=self.relu_dropout)
output = self.output_dense_layer(output)
return output
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
#
# Copyright 2017 CPqD. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@author: Akira Miasato
Audio generation examples.
Generators should always yield bytestrings. Our ASR interface only supports
linear PCM with little-endian signed 16bit samples. Their length may be
variable, as long as they are smaller than the predefined maximum payload size
from the configured websocket connection, and the length of each bytestring
is modulo 0 with the size of the sample (i.e. is even in length).
"""
import soundfile as sf
import pyaudio
import time
class MicAudioSource:
"""
Simple microphone reader.
chunk_size is in samples, so the size in bytes of the sent packet is
sizeof(<sample_type>) * chunk_size. This value should be smaller than the
predefined maximum payload from the configured websocket connection.
:sample_rate: Sample rate for the captured audio
:sample_type: Sample type provided by pyaudio
:chunk_size: Size of the blocks of audio which will be sent (in samples)
:yields: bytestrings of size <chunk_size> * sizeof(<sample_type>)
Does not terminate. When used inside a SpeechRecognition instance, it
is halted when the recognition instance is cancelled or closed.
"""
def __init__(self, sample_rate=8000, sample_type=pyaudio.paInt16, chunk_size=4096):
self._audio = pyaudio.PyAudio()
self._sample_rate = sample_rate
self._sample_type = sample_type
self._chunk_size = chunk_size
def __enter__(self):
self._stream = self._audio.open(
format=self._sample_type,
channels=1,
rate=self._sample_rate,
input=True,
frames_per_buffer=self._chunk_size,
)
return self
def __exit__(self, etype, value, traceback):
self._stream.stop_stream()
self._stream.close()
def __iter__(self):
return self
def __next__(self):
if not self._stream.is_active:
raise StopIteration
return self._stream.read(self._chunk_size)
def FileAudioSource(path, chunk_size=4096):
"""
Simple audio file reader. Should be compatible with all files supported
by 'soundfile' package.
chunk_size is in samples, so the size in bytes of the sent packet is
2*chunk_size, since we are sending 16-bit signed PCM samples. chunk_size*2
should be smaller than the predefined maximum payload from the configured
websocket connection.
:path: Path to the audio input (any format supported by soundfile package)
:chunk_size: Size of the blocks of audio which will be sent (in samples)
:yields: bytestrings of size <chunk_size> * 2
Terminates when the audio file provided has no more content
"""
f = open(path, "rb")
bytestr = f.read(chunk_size)
while bytestr:
yield bytestr
bytestr = f.read(chunk_size)
class BufferAudioSource:
"""
Very simple buffer source.
This generator has a "write" method which updates its internal buffer,
which is periodically consumed by the ASR instance in which it is inserted.
:buffer_size: Size of the internal buffer (in bytes)
:yields: bytestrings of size <buffer_size>
Terminates only if the "finish" method is called, in which case the
remaining buffer is sent regardless of its size.
"""
def __init__(self, chunk_size=4096):
self._buffer = b""
self._chunk_size = chunk_size
self._finished = False
def __iter__(self):
return self
def __next__(self):
while True:
if len(self._buffer) >= self._chunk_size:
r = self._buffer[: self._chunk_size]
self._buffer = self._buffer[self._chunk_size :]
return r
elif self._finished:
if self._buffer:
r = self._buffer
self._buffer = b""
return r
else:
raise StopIteration
time.sleep(0.05)
def write(self, byte_str):
"""
Writes to the buffer.
:byte_str: A byte string (char array). Currently only 16-bit signed
little-endian linear PCM is accepted.
"""
self._finished = False
self._buffer += byte_str
def finish(self):
"""
Signals the ASR instance that one's finished writing and is now waiting
for the recognition result.
"""
self._finished = True
| nilq/baby-python | python |
#
# @lc app=leetcode id=160 lang=python3
#
# [160] Intersection of Two Linked Lists
#
# https://leetcode.com/problems/intersection-of-two-linked-lists/description/
#
# algorithms
# Easy (39.05%)
# Likes: 3257
# Dislikes: 372
# Total Accepted: 438K
# Total Submissions: 1.1M
# Testcase Example: '8\n[4,1,8,4,5]\n[5,0,1,8,4,5]\n2\n3'
#
# Write a program to find the node at which the intersection of two singly
# linked lists begins.
#
# For example, the following two linked lists:
#
#
# begin to intersect at node c1.
#
#
#
# Example 1:
#
#
#
# Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA =
# 2, skipB = 3
# Output: Reference of the node with value = 8
# Input Explanation: The intersected node's value is 8 (note that this must not
# be 0 if the two lists intersect). From the head of A, it reads as
# [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes
# before the intersected node in A; There are 3 nodes before the intersected
# node in B.
#
#
#
# Example 2:
#
#
#
# Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3,
# skipB = 1
# Output: Reference of the node with value = 2
# Input Explanation: The intersected node's value is 2 (note that this must not
# be 0 if the two lists intersect). From the head of A, it reads as
# [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes
# before the intersected node in A; There are 1 node before the intersected
# node in B.
#
#
#
#
# Example 3:
#
#
#
# Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
# Output: null
# Input Explanation: From the head of A, it reads as [2,6,4]. From the head of
# B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must
# be 0, while skipA and skipB can be arbitrary values.
# Explanation: The two lists do not intersect, so return null.
#
#
#
#
# Notes:
#
#
# If the two linked lists have no intersection at all, return null.
# The linked lists must retain their original structure after the function
# returns.
# You may assume there are no cycles anywhere in the entire linked
# structure.
# Your code should preferably run in O(n) time and use only O(1) memory.
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
if headA is None or headB is None:
return None
a = headA
b = headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a
# @lc code=end
| nilq/baby-python | python |
def dividend(ticker_info):
for ticker, value in ticker_info.items():
value_dividends = value["dividends"].to_frame().reset_index()
dividend_groupped = value_dividends.groupby(value_dividends["Date"].dt.year)['Dividends'].agg(['sum'])
dividend_groupped = dividend_groupped.rename(columns={'sum': ticker})
value["dividends_grouped"] = dividend_groupped
| nilq/baby-python | python |
import markdown
from flask import abort, flash, redirect, render_template, request
from flask_babel import gettext as _
from flask_login import current_user, login_required
from ..ext import db
from ..forms.base import DeleteForm
from ..models import Brew, TastingNote
from ..utils.pagination import get_page
from ..utils.views import next_redirect
from . import tasting_bp
from .forms import TastingNoteForm
from .permissions import AccessManager
from .utils import TastingUtils
@tasting_bp.route('/all', endpoint='all')
def all_tasting_notes():
page_size = 20
page = get_page(request)
kw = {}
if current_user.is_authenticated:
kw['extra_user'] = current_user
query = TastingUtils.notes(public_only=True, **kw)
query = query.order_by(db.desc(TastingNote.date))
pagination = query.paginate(page, page_size)
context = {
'public_only': True,
'pagination': pagination,
}
return render_template('tasting/list.html', **context)
@tasting_bp.route('/<int:brew_id>/add', methods=['GET', 'POST'], endpoint='add')
@login_required
def brew_add_tasting_note(brew_id):
brew = Brew.query.get_or_404(brew_id)
AccessManager.check_create(brew)
form = TastingNoteForm()
if form.validate_on_submit():
form.save(brew)
flash(_('tasting note for %(brew)s saved', brew=brew.name), category='success')
next_ = next_redirect('brew.details', brew_id=brew.id)
return redirect(next_)
ctx = {
'brew': brew,
'form': form,
}
return render_template('tasting/tasting_note.html', **ctx)
@tasting_bp.route('/<int:note_id>/delete', methods=['GET', 'POST'], endpoint='delete')
@login_required
def brew_delete_tasting_note(note_id):
note = TastingNote.query.get_or_404(note_id)
brew = note.brew
AccessManager(note, None).check()
form = DeleteForm()
if form.validate_on_submit() and form.delete_it.data:
db.session.delete(note)
db.session.commit()
flash(
_('tasting note for brew %(brew)s has been deleted', brew=brew.name),
category='success'
)
next_ = next_redirect('brew.details', brew_id=brew.id)
return redirect(next_)
ctx = {
'brew': brew,
'note': note,
'delete_form': form,
}
return render_template('tasting/tasting_note_delete.html', **ctx)
@tasting_bp.route('/ajaxtext', endpoint='loadtext')
def brew_load_tasting_note_text():
provided_id = request.args.get('id')
if not provided_id:
abort(400)
note_id = provided_id.rsplit('_', 1)[-1]
note = TastingNote.query.get_or_404(note_id)
return note.text
@tasting_bp.route('/ajaxupdate', methods=['POST'], endpoint='update')
@login_required
def brew_update_tasting_note():
note_id = request.form.get('pk')
if not note_id:
abort(400)
note = TastingNote.query.get_or_404(note_id)
AccessManager(note, None).check()
value = request.form.get('value', '').strip()
if value:
note.text = value
db.session.add(note)
db.session.commit()
return markdown.markdown(value)
return note.text_html
| nilq/baby-python | python |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from collections import OrderedDict
from teach.dataset.actions import (
Action_Audio,
Action_Basic,
Action_Keyboard,
Action_MapGoal,
Action_Motion,
Action_ObjectInteraction,
Action_ProgressCheck,
)
class Interaction:
def __init__(self, agent_id, action, is_object=False, status=None, time_start=None):
self.agent_id = agent_id
self.action = action
self.is_object = is_object
self.status = status
self.time_start = time_start
def to_dict(self):
_dict = OrderedDict()
if self.is_object:
_dict["object_id"] = self.agent_id
else:
_dict["agent_id"] = self.agent_id
_dict.update(self.action.to_dict())
if self.status is not None:
_dict["status"] = self.status
return _dict
@classmethod
def from_dict(cls, interaction_dict, action_type) -> "Interaction":
if "object_id" in interaction_dict:
is_object = True
agent_id = interaction_dict["object_id"]
else:
is_object = False
agent_id = interaction_dict["agent_id"]
if action_type == "Motion":
action = Action_Motion.from_dict(interaction_dict)
elif action_type == "MapGoal":
action = Action_MapGoal.from_dict(interaction_dict)
elif action_type == "ObjectInteraction":
action = Action_ObjectInteraction.from_dict(interaction_dict)
elif action_type == "ProgressCheck":
action = Action_ProgressCheck.from_dict(interaction_dict)
elif action_type == "Keyboard":
action = Action_Keyboard.from_dict(interaction_dict)
elif action_type == "Audio":
action = Action_Audio.from_dict(interaction_dict)
else:
action = Action_Basic.from_dict(interaction_dict)
status = interaction_dict.get("status")
time_start = interaction_dict.get("time_start")
return cls(agent_id=agent_id, action=action, is_object=is_object, status=status, time_start=time_start)
| nilq/baby-python | python |
import collections
import pathlib
import sys
import os
import json
def as_dict(par):
if not par:
return None
if isinstance(par, dict):
return par
else:
return dict(par._asdict())
def from_dict(par_dict):
if not par_dict:
return None
# par = collections.namedtuple('Params', par_dict.keys())(**par_dict)
par = collections.namedtuple('Params', par_dict.keys())
class IBLParams(par):
def set(self, field, value):
d = as_dict(self)
d[field] = value
return from_dict(d)
def as_dict(self):
return as_dict(self)
return IBLParams(**par_dict)
def getfile(str_params):
"""
Returns full path of the param file per system convention:
linux/mac: ~/.str_params, Windows: APPDATA folder
:param str_params: string that identifies parm file
:return: string of full path
"""
if sys.platform == 'win32' or sys.platform == 'cygwin':
pfile = str(pathlib.PurePath(os.environ['APPDATA'], '.' + str_params))
else:
pfile = str(pathlib.PurePath(pathlib.Path.home(), '.' + str_params))
return pfile
def read(str_params, default=None):
"""
Reads in and parse Json parameter file into dictionary
:param str_params: path to text json file
:param default: default values for missing parameters
:return: named tuple containing parameters
"""
pfile = getfile(str_params)
if os.path.isfile(pfile):
with open(pfile) as fil:
par_dict = json.loads(fil.read())
else:
par_dict = as_dict(default)
# without default parameters
default = as_dict(default)
# TODO : behaviour for non existing file
# tat = params.read('rijafa', default={'toto': 'titi', 'tata': 1})
if not default or default.keys() == par_dict.keys():
return from_dict(par_dict)
# if default parameters bring in a new parameter
new_keys = set(default.keys()).difference(set(par_dict.keys()))
for nk in new_keys:
par_dict[nk] = default[nk]
# write the new parameter file with the extra param
write(str_params, par_dict)
return from_dict(par_dict)
def write(str_params, par):
"""
Write a parameter file in Json format
:param str_params: path to text json file
:param par: dictionary containing parameters values
:return: None
"""
pfile = getfile(str_params)
with open(pfile, 'w') as fil:
json.dump(as_dict(par), fil, sort_keys=False, indent=4)
| nilq/baby-python | python |
import unittest
import ServiceGame
from model.Platform import platform
from model.Publishers import publisher
class TestServiceGame(unittest.TestCase):
def test_games_Wii(self):
wiigames = ServiceGame.platz(platform('Wii'))
self.assertEqual(15, len(wiigames))
def test_games_PC(self):
pc = ServiceGame.platz(platform('PC'))
self.assertEqual(1, len(pc))
def test_games_SquareSoft(self):
squaresoft = ServiceGame.plubz(publisher('SquareSoft'))
self.assertNotEqual(0, len(squaresoft))
def test_games_ElectronicArts(self):
electronicarts = ServiceGame.plubz(publisher('Electronic Arts'))
self.assertEqual(5, len(electronicarts))
def test_csv_is_create_platform(self):
ServiceGame.escolher('P1', platform('Wii'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(15, len(conteudo))
def test_csv_is_create_publisher(self):
ServiceGame.escolher('P2', publisher('Electronic Arts'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(5, len(conteudo))
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
from robot_server.service.errors import RobotServerError, \
CommonErrorDef, ErrorDef
class SystemException(RobotServerError):
"""Base of all system exceptions"""
pass
class SystemTimeAlreadySynchronized(SystemException):
"""
Cannot update system time because it is already being synchronized
via NTP or local RTC.
"""
def __init__(self, msg: str):
super().__init__(definition=CommonErrorDef.ACTION_FORBIDDEN,
reason=msg)
class SystemSetTimeException(SystemException):
"""Server process Failure"""
def __init__(self, msg: str, definition: ErrorDef = None):
if definition is None:
definition = CommonErrorDef.INTERNAL_SERVER_ERROR
super().__init__(definition=definition,
error=msg)
| nilq/baby-python | python |
from flask import Flask, redirect, render_template, url_for
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo=PyMongo(app, uri="mongodb://localhost:27017/mars_app")
@app.route("/")
def index():
mars_info = mongo.db.mars_info.find_one()
return render_template("index.html", mars_info=mars_info)
#trigger scrape
@app.route("/scrape")
def scrape():
mars_info = mongo.db.mars_info
mars_data = scrape_mars.mars_scrape_news()
mars_data = scrape_mars.mars_scrape_image()
mars_data = scrape_mars.mars_scrape_faq()
mars_data = scrape_mars.mars_scrape_hemi()
mars_info.update({}, mars_data, upsert=True)
return "Scrape Complete!"
if __name__ == "__main__":
app.run() | nilq/baby-python | python |
import sys
with open(sys.argv[1]) as f:
data = f.read()
stack = []
for i in range(len(data)):
if i%1000000==0:
print("%.2f %%"%(i/len(data)*100))
stack += [data[i]]
if (len(stack)>=8 and
stack[-8] in "<" and
stack[-7] in "Ss" and
stack[-6] in "Cc" and
stack[-5] in "Rr" and
stack[-4] in "Ii" and
stack[-3] in "Pp" and
stack[-2] in "Tt" and
stack[-1] in ">"):
for i in range(8):
stack.pop()
print("".join(stack)[:-1])
| nilq/baby-python | python |
#!/usr/bin/env python
import numpy as np
import sys
from readFiles import *
thisfh = sys.argv[1]
linkerfh = "part_Im.xyz"
#Read the linker file
lAtomList, lAtomCord = readxyz(linkerfh)
sAtomList, sAtomCord = readxyz(thisfh)
a,b,c,alpha,beta,gamma = readcifFile(thisfh[:-4] + ".cif")
cell_params = [a, b, c, alpha, beta, gamma]
#sAtomCord, sAtomList = reduceToUnitCell(sAtomCord,sAtomList,cell_params,-1,2)
sAtomList = replaceSiwZn(sAtomList)
#writexyzFile(sAtomCord,sAtomList,"testZn.xyz")
minDist = calcMinZnZnDist(sAtomCord,sAtomList)
sf = 6/minDist
a = a*sf; b = b*sf; c = c*sf;
sAtomCord = expandStructure(sAtomCord,sf)
#writexyzFile(sAtomCord,sAtomList,"testZnExpanded.xyz")
sAtomCord, sAtomList = putLinkerIn(sAtomList,lAtomList,sAtomCord,lAtomCord)
cell_params = [a, b, c, alpha, beta, gamma]
#writexyzFile(sAtomCord,sAtomList, thisfh[:-4] + "_ZIF.xyz",cell_params)
reducedCord,reducedList = reduceToUnitCell(sAtomCord,sAtomList,cell_params,.5,1.5)
writexyzFile(reducedCord,reducedList, thisfh[:-4] + "_ZIF_unitcell.xyz",cell_params)
| nilq/baby-python | python |
import copy
import os
import random
import kerastuner
import kerastuner.engine.hypermodel as hm_module
import tensorflow as tf
from autokeras.hypermodel import base
class AutoTuner(kerastuner.engine.multi_execution_tuner.MultiExecutionTuner):
"""A Tuner class based on KerasTuner for AutoKeras.
Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
Hypermodel which can be directly built into a Keras model, but also the
preprocessors. Therefore, a HyperGraph stores the overall search space containing
both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the
PreprocessGraph and KerasGraph with the provided HyperParameters.
# Arguments
hyper_graph: HyperGraph. The HyperGraph to be tuned.
fit_on_val_data: Boolean. Use the training set and validation set for the
final fit of the best model.
**kwargs: The other args supported by KerasTuner.
"""
def __init__(self, hyper_graph, hypermodel, fit_on_val_data=False, **kwargs):
self.hyper_graph = hyper_graph
super().__init__(
hypermodel=hm_module.KerasHyperModel(hypermodel),
# TODO: Support resume of a previous run.
overwrite=True,
**kwargs)
self.preprocess_graph = None
self.best_hp = None
self.fit_on_val_data = fit_on_val_data
def run_trial(self, trial, **fit_kwargs):
"""Preprocess the x and y before calling the base run_trial."""
# Initialize new fit kwargs for the current trial.
new_fit_kwargs = copy.copy(fit_kwargs)
# Preprocess the dataset and set the shapes of the HyperNodes.
self.preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
trial.hyperparameters)
self.hypermodel = hm_module.KerasHyperModel(keras_graph)
self._prepare_run(self.preprocess_graph, new_fit_kwargs, True)
super().run_trial(trial, **new_fit_kwargs)
def _prepare_run(self, preprocess_graph, fit_kwargs, fit=False):
dataset, validation_data = preprocess_graph.preprocess(
dataset=fit_kwargs.get('x', None),
validation_data=fit_kwargs.get('validation_data', None),
fit=fit)
# Batching
batch_size = fit_kwargs.pop('batch_size', 32)
dataset = dataset.batch(batch_size)
validation_data = validation_data.batch(batch_size)
# Update the new fit kwargs values
fit_kwargs['x'] = dataset
fit_kwargs['validation_data'] = validation_data
fit_kwargs['y'] = None
def _get_save_path(self, trial, name):
filename = '{trial_id}-{name}'.format(trial_id=trial.trial_id, name=name)
return os.path.join(self.get_trial_dir(trial.trial_id), filename)
def on_trial_end(self, trial):
"""Save and clear the hypermodel and preprocess_graph."""
super().on_trial_end(trial)
self.preprocess_graph.save(self._get_save_path(trial, 'preprocess_graph'))
self.hypermodel.hypermodel.save(self._get_save_path(trial, 'keras_graph'))
self.preprocess_graph = None
self.hypermodel = None
def load_model(self, trial):
"""Load the model in a history trial.
# Arguments
trial: Trial. The trial to be loaded.
# Returns
Tuple of (PreprocessGraph, KerasGraph, tf.keras.Model).
"""
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
trial.hyperparameters)
preprocess_graph.reload(self._get_save_path(trial, 'preprocess_graph'))
keras_graph.reload(self._get_save_path(trial, 'keras_graph'))
self.hypermodel = hm_module.KerasHyperModel(keras_graph)
models = (preprocess_graph, keras_graph, super().load_model(trial))
self.hypermodel = None
return models
def get_best_model(self):
"""Load the best PreprocessGraph and Keras model.
It is mainly used by the predict and evaluate function of AutoModel.
# Returns
Tuple of (PreprocessGraph, tf.keras.Model).
"""
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
self.best_hp)
preprocess_graph.reload(self.best_preprocess_graph_path)
keras_graph.reload(self.best_keras_graph_path)
model = keras_graph.build(self.best_hp)
model.load_weights(self.best_model_path)
return preprocess_graph, model
def search(self, callbacks=None, **fit_kwargs):
"""Search for the best HyperParameters.
If there is not early-stopping in the callbacks, the early-stopping callback
is injected to accelerate the search process. At the end of the search, the
best model will be fully trained with the specified number of epochs.
"""
# Insert early-stopping for acceleration.
if not callbacks:
callbacks = []
new_callbacks = self._deepcopy_callbacks(callbacks)
if not any([isinstance(callback, tf.keras.callbacks.EarlyStopping)
for callback in callbacks]):
new_callbacks.append(tf.keras.callbacks.EarlyStopping(patience=10))
super().search(callbacks=new_callbacks, **fit_kwargs)
best_trial = self.oracle.get_best_trials(1)[0]
self.best_hp = best_trial.hyperparameters
preprocess_graph, keras_graph, model = self.get_best_models()[0]
preprocess_graph.save(self.best_preprocess_graph_path)
keras_graph.save(self.best_keras_graph_path)
# Fully train the best model with original callbacks.
if not any([isinstance(callback, tf.keras.callbacks.EarlyStopping)
for callback in callbacks]) or self.fit_on_val_data:
fit_kwargs['callbacks'] = self._deepcopy_callbacks(callbacks)
self._prepare_run(preprocess_graph, fit_kwargs)
if self.fit_on_val_data:
fit_kwargs['x'] = fit_kwargs['x'].concatenate(
fit_kwargs['validation_data'])
model = keras_graph.build(self.best_hp)
model.fit(**fit_kwargs)
model.save_weights(self.best_model_path)
@property
def best_preprocess_graph_path(self):
return os.path.join(self.project_dir, 'best_preprocess_graph')
@property
def best_keras_graph_path(self):
return os.path.join(self.project_dir, 'best_keras_graph')
@property
def best_model_path(self):
return os.path.join(self.project_dir, 'best_model')
class RandomSearch(AutoTuner, kerastuner.RandomSearch):
"""KerasTuner RandomSearch with preprocessing layer tuning."""
pass
class Hyperband(AutoTuner, kerastuner.Hyperband):
"""KerasTuner Hyperband with preprocessing layer tuning."""
pass
class BayesianOptimization(AutoTuner, kerastuner.BayesianOptimization):
"""KerasTuner BayesianOptimization with preprocessing layer tuning."""
pass
class GreedyOracle(kerastuner.Oracle):
"""An oracle combining random search and greedy algorithm.
It groups the HyperParameters into several categories, namely, HyperGraph,
Preprocessor, Architecture, and Optimization. The oracle tunes each group
separately using random search. In each trial, it use a greedy strategy to
generate new values for one of the categories of HyperParameters and use the best
trial so far for the rest of the HyperParameters values.
# Arguments
hyper_graph: HyperGraph. The hyper_graph model to be tuned.
seed: Int. Random seed.
"""
HYPER = 'HYPER'
PREPROCESS = 'PREPROCESS'
OPT = 'OPT'
ARCH = 'ARCH'
STAGES = [HYPER, PREPROCESS, OPT, ARCH]
@staticmethod
def next_stage(stage):
stages = GreedyOracle.STAGES
return stages[(stages.index(stage) + 1) % len(stages)]
def __init__(self, hyper_graph, seed=None, **kwargs):
super().__init__(**kwargs)
self.hyper_graph = hyper_graph
# Start from tuning the hyper block hps.
self._stage = GreedyOracle.HYPER
# Sets of HyperParameter names.
self._hp_names = {
GreedyOracle.HYPER: set(),
GreedyOracle.PREPROCESS: set(),
GreedyOracle.OPT: set(),
GreedyOracle.ARCH: set(),
}
# The quota used to tune each category of hps.
self._capacity = {
GreedyOracle.HYPER: 1,
GreedyOracle.PREPROCESS: 1,
GreedyOracle.OPT: 1,
GreedyOracle.ARCH: 4,
}
self._stage_trial_count = 0
self.seed = seed or random.randint(1, 1e4)
# Incremented at every call to `populate_space`.
self._seed_state = self.seed
self._tried_so_far = set()
self._max_collisions = 5
def set_state(self, state):
super().set_state(state)
# TODO: self.hyper_graph.set_state(state['hyper_graph'])
# currently the state is not json serializable.
self._stage = state['stage']
self._capacity = state['capacity']
def get_state(self):
state = super().get_state()
state.update({
# TODO: 'hyper_graph': self.hyper_graph.get_state(),
# currently the state is not json serializable.
'stage': self._stage,
'capacity': self._capacity,
})
return state
def update_space(self, hyperparameters):
# Get the block names.
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
hyperparameters)
# Add the new Hyperparameters to different categories.
ref_names = {hp.name for hp in self.hyperparameters.space}
for hp in hyperparameters.space:
if hp.name not in ref_names:
hp_type = None
if any([hp.name.startswith(block.name)
for block in self.hyper_graph.blocks
if isinstance(block, base.HyperBlock)]):
hp_type = GreedyOracle.HYPER
elif any([hp.name.startswith(block.name)
for block in preprocess_graph.blocks]):
hp_type = GreedyOracle.PREPROCESS
elif any([hp.name.startswith(block.name)
for block in keras_graph.blocks]):
hp_type = GreedyOracle.ARCH
else:
hp_type = GreedyOracle.OPT
self._hp_names[hp_type].add(hp.name)
super().update_space(hyperparameters)
def _populate_space(self, trial_id):
for _ in range(len(GreedyOracle.STAGES)):
values = self._generate_stage_values()
# Reached max collisions.
if values is None:
# Try next stage.
self._stage = GreedyOracle.next_stage(self._stage)
self._stage_trial_count = 0
continue
# Values found.
self._stage_trial_count += 1
if self._stage_trial_count == self._capacity[self._stage]:
self._stage = GreedyOracle.next_stage(self._stage)
self._stage_trial_count = 0
return {'status': kerastuner.engine.trial.TrialStatus.RUNNING,
'values': values}
# All stages reached max collisions.
return {'status': kerastuner.engine.trial.TrialStatus.STOPPED,
'values': None}
def _generate_stage_values(self):
best_trials = self.get_best_trials()
if best_trials:
best_values = best_trials[0].hyperparameters.values
else:
best_values = self.hyperparameters.values
collisions = 0
while 1:
# Generate new values for the current stage.
values = {}
for p in self.hyperparameters.space:
if p.name in self._hp_names[self._stage]:
values[p.name] = p.random_sample(self._seed_state)
self._seed_state += 1
values = {**best_values, **values}
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash not in self._tried_so_far:
self._tried_so_far.add(values_hash)
break
collisions += 1
if collisions > self._max_collisions:
# Reached max collisions. No value to return.
return None
return values
class Greedy(AutoTuner):
def __init__(self,
hyper_graph,
hypermodel,
objective,
max_trials,
fit_on_val_data=False,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
**kwargs):
self.seed = seed
oracle = GreedyOracle(
hyper_graph=hyper_graph,
objective=objective,
max_trials=max_trials,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries)
hp = oracle.get_space()
preprocess_graph, keras_graph = hyper_graph.build_graphs(hp)
oracle.update_space(hp)
super().__init__(
hyper_graph=hyper_graph,
fit_on_val_data=fit_on_val_data,
oracle=oracle,
hypermodel=hypermodel,
**kwargs)
TUNER_CLASSES = {
'bayesian': BayesianOptimization,
'random': RandomSearch,
'hyperband': Hyperband,
'greedy': Greedy,
'image_classifier': Greedy,
'image_regressor': Greedy,
'text_classifier': Greedy,
'text_regressor': Greedy,
'structured_data_classifier': Greedy,
'structured_data_regressor': Greedy,
}
def get_tuner_class(tuner):
if isinstance(tuner, str) and tuner in TUNER_CLASSES:
return TUNER_CLASSES.get(tuner)
else:
raise ValueError('The value {tuner} passed for argument tuner is invalid, '
'expected one of "greedy", "random", "hyperband", '
'"bayesian".'.format(tuner=tuner))
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Lacework ContractInfo API wrapper.
"""
from laceworksdk.api.base_endpoint import BaseEndpoint
class ContractInfoAPI(BaseEndpoint):
def __init__(self, session):
"""
Initializes the ContractInfoAPI object.
:param session: An instance of the HttpSession class
:return ContractInfoAPI object.
"""
super().__init__(session, "ContractInfo")
def get(self,
**request_params):
"""
A method to get ContractInfo objects.
:param request_params: Additional request parameters.
(provides support for parameters that may be added in the future)
:return response json
"""
params = self.build_dict_from_items(
request_params
)
response = self._session.get(self.build_url(), params=params)
return response.json()
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Translated source for Order.
##
# Source file: Order.java
# Target file: Order.py
#
# Original file copyright original author(s).
# This file copyright Troy Melhase, troy@gci.net.
#
# WARNING: all changes to this file will be lost.
from ib.lib import Double, Integer
from ib.ext.Util import Util
class Order(object):
""" generated source for Order
"""
CUSTOMER = 0
FIRM = 1
OPT_UNKNOWN = '?'
OPT_BROKER_DEALER = 'b'
OPT_CUSTOMER = 'c'
OPT_FIRM = 'f'
OPT_ISEMM = 'm'
OPT_FARMM = 'n'
OPT_SPECIALIST = 'y'
AUCTION_MATCH = 1
AUCTION_IMPROVEMENT = 2
AUCTION_TRANSPARENT = 3
EMPTY_STR = ""
m_orderId = 0
m_clientId = 0
m_permId = 0
m_action = ""
m_totalQuantity = 0
m_orderType = ""
m_lmtPrice = float()
m_auxPrice = float()
m_tif = ""
m_ocaGroup = ""
m_ocaType = 0
m_orderRef = ""
m_transmit = bool()
m_parentId = 0
m_blockOrder = bool()
m_sweepToFill = bool()
m_displaySize = 0
m_triggerMethod = 0
m_outsideRth = bool()
m_hidden = bool()
m_goodAfterTime = ""
m_goodTillDate = ""
m_overridePercentageConstraints = bool()
m_rule80A = ""
m_allOrNone = bool()
m_minQty = 0
m_percentOffset = float()
m_trailStopPrice = float()
m_faGroup = ""
m_faProfile = ""
m_faMethod = ""
m_faPercentage = ""
m_openClose = ""
m_origin = 0
m_shortSaleSlot = 0
m_designatedLocation = ""
m_discretionaryAmt = float()
m_eTradeOnly = bool()
m_firmQuoteOnly = bool()
m_nbboPriceCap = float()
m_auctionStrategy = 0
m_startingPrice = float()
m_stockRefPrice = float()
m_delta = float()
m_stockRangeLower = float()
m_stockRangeUpper = float()
m_volatility = float()
m_volatilityType = 0
m_continuousUpdate = 0
m_referencePriceType = 0
m_deltaNeutralOrderType = ""
m_deltaNeutralAuxPrice = float()
m_basisPoints = float()
m_basisPointsType = 0
m_scaleInitLevelSize = 0
m_scaleSubsLevelSize = 0
m_scalePriceIncrement = float()
m_account = ""
m_settlingFirm = ""
m_clearingAccount = ""
m_clearingIntent = ""
m_algoStrategy = ""
m_algoParams = list()
m_whatIf = bool()
m_notHeld = bool()
def __init__(self):
self.m_outsideRth = False
self.m_openClose = "O"
self.m_origin = self.CUSTOMER
self.m_transmit = True
self.m_designatedLocation = self.EMPTY_STR
self.m_minQty = Integer.MAX_VALUE
self.m_percentOffset = Double.MAX_VALUE
self.m_nbboPriceCap = Double.MAX_VALUE
self.m_startingPrice = Double.MAX_VALUE
self.m_stockRefPrice = Double.MAX_VALUE
self.m_delta = Double.MAX_VALUE
self.m_stockRangeLower = Double.MAX_VALUE
self.m_stockRangeUpper = Double.MAX_VALUE
self.m_volatility = Double.MAX_VALUE
self.m_volatilityType = Integer.MAX_VALUE
self.m_deltaNeutralOrderType = self.EMPTY_STR
self.m_deltaNeutralAuxPrice = Double.MAX_VALUE
self.m_referencePriceType = Integer.MAX_VALUE
self.m_trailStopPrice = Double.MAX_VALUE
self.m_basisPoints = Double.MAX_VALUE
self.m_basisPointsType = Integer.MAX_VALUE
self.m_scaleInitLevelSize = Integer.MAX_VALUE
self.m_scaleSubsLevelSize = Integer.MAX_VALUE
self.m_scalePriceIncrement = Double.MAX_VALUE
self.m_whatIf = False
self.m_notHeld = False
def __eq__(self, p_other):
if self is p_other:
return True
if p_other is None:
return False
l_theOther = p_other
if (self.m_permId == l_theOther.m_permId):
return True
if (self.m_orderId != l_theOther.m_orderId) or (self.m_clientId != l_theOther.m_clientId) or (self.m_totalQuantity != l_theOther.m_totalQuantity) or (self.m_lmtPrice != l_theOther.m_lmtPrice) or (self.m_auxPrice != l_theOther.m_auxPrice) or (self.m_ocaType != l_theOther.m_ocaType) or (self.m_transmit != l_theOther.m_transmit) or (self.m_parentId != l_theOther.m_parentId) or (self.m_blockOrder != l_theOther.m_blockOrder) or (self.m_sweepToFill != l_theOther.m_sweepToFill) or (self.m_displaySize != l_theOther.m_displaySize) or (self.m_triggerMethod != l_theOther.m_triggerMethod) or (self.m_outsideRth != l_theOther.m_outsideRth) or (self.m_hidden != l_theOther.m_hidden) or (self.m_overridePercentageConstraints != l_theOther.m_overridePercentageConstraints) or (self.m_allOrNone != l_theOther.m_allOrNone) or (self.m_minQty != l_theOther.m_minQty) or (self.m_percentOffset != l_theOther.m_percentOffset) or (self.m_trailStopPrice != l_theOther.m_trailStopPrice) or (self.m_origin != l_theOther.m_origin) or (self.m_shortSaleSlot != l_theOther.m_shortSaleSlot) or (self.m_discretionaryAmt != l_theOther.m_discretionaryAmt) or (self.m_eTradeOnly != l_theOther.m_eTradeOnly) or (self.m_firmQuoteOnly != l_theOther.m_firmQuoteOnly) or (self.m_nbboPriceCap != l_theOther.m_nbboPriceCap) or (self.m_auctionStrategy != l_theOther.m_auctionStrategy) or (self.m_startingPrice != l_theOther.m_startingPrice) or (self.m_stockRefPrice != l_theOther.m_stockRefPrice) or (self.m_delta != l_theOther.m_delta) or (self.m_stockRangeLower != l_theOther.m_stockRangeLower) or (self.m_stockRangeUpper != l_theOther.m_stockRangeUpper) or (self.m_volatility != l_theOther.m_volatility) or (self.m_volatilityType != l_theOther.m_volatilityType) or (self.m_continuousUpdate != l_theOther.m_continuousUpdate) or (self.m_referencePriceType != l_theOther.m_referencePriceType) or (self.m_deltaNeutralAuxPrice != l_theOther.m_deltaNeutralAuxPrice) or (self.m_basisPoints != l_theOther.m_basisPoints) or (self.m_basisPointsType != l_theOther.m_basisPointsType) or (self.m_scaleInitLevelSize != l_theOther.m_scaleInitLevelSize) or (self.m_scaleSubsLevelSize != l_theOther.m_scaleSubsLevelSize) or (self.m_scalePriceIncrement != l_theOther.m_scalePriceIncrement) or (self.m_whatIf != l_theOther.m_whatIf) or (self.m_notHeld != l_theOther.m_notHeld):
return False
if (Util.StringCompare(self.m_action, l_theOther.m_action) != 0) or (Util.StringCompare(self.m_orderType, l_theOther.m_orderType) != 0) or (Util.StringCompare(self.m_tif, l_theOther.m_tif) != 0) or (Util.StringCompare(self.m_ocaGroup, l_theOther.m_ocaGroup) != 0) or (Util.StringCompare(self.m_orderRef, l_theOther.m_orderRef) != 0) or (Util.StringCompare(self.m_goodAfterTime, l_theOther.m_goodAfterTime) != 0) or (Util.StringCompare(self.m_goodTillDate, l_theOther.m_goodTillDate) != 0) or (Util.StringCompare(self.m_rule80A, l_theOther.m_rule80A) != 0) or (Util.StringCompare(self.m_faGroup, l_theOther.m_faGroup) != 0) or (Util.StringCompare(self.m_faProfile, l_theOther.m_faProfile) != 0) or (Util.StringCompare(self.m_faMethod, l_theOther.m_faMethod) != 0) or (Util.StringCompare(self.m_faPercentage, l_theOther.m_faPercentage) != 0) or (Util.StringCompare(self.m_openClose, l_theOther.m_openClose) != 0) or (Util.StringCompare(self.m_designatedLocation, l_theOther.m_designatedLocation) != 0) or (Util.StringCompare(self.m_deltaNeutralOrderType, l_theOther.m_deltaNeutralOrderType) != 0) or (Util.StringCompare(self.m_account, l_theOther.m_account) != 0) or (Util.StringCompare(self.m_settlingFirm, l_theOther.m_settlingFirm) != 0) or (Util.StringCompare(self.m_clearingAccount, l_theOther.m_clearingAccount) != 0) or (Util.StringCompare(self.m_clearingIntent, l_theOther.m_clearingIntent) != 0) or (Util.StringCompare(self.m_algoStrategy, l_theOther.m_algoStrategy) != 0):
return False
if not Util.VectorEqualsUnordered(self.m_algoParams, l_theOther.m_algoParams):
return False
return True
| nilq/baby-python | python |
"""
Wrap around the bottleneck distance executable from Dionysus, and provide
some utility functions for plotting
"""
import subprocess
import numpy as np
import matplotlib.pyplot as plt
import os
def plotDGM(dgm, color = 'b', sz = 20, label = 'dgm'):
if dgm.size == 0:
return
# Create Lists
# set axis values
axMin = np.min(dgm)
axMax = np.max(dgm)
axRange = axMax-axMin;
# plot points
plt.scatter(dgm[:, 0], dgm[:, 1], sz, color,label=label)
plt.hold(True)
# plot line
plt.plot([axMin-axRange/5,axMax+axRange/5], [axMin-axRange/5, axMax+axRange/5],'k');
# adjust axis
#plt.axis([axMin-axRange/5,axMax+axRange/5, axMin-axRange/5, axMax+axRange/5])
# add labels
plt.xlabel('Time of Birth')
plt.ylabel('Time of Death')
def plot2DGMs(P1, P2, l1 = 'Diagram 1', l2 = 'Diagram 2'):
plotDGM(P1, 'r', 10, label = l1)
plt.hold(True)
plt.plot(P2[:, 0], P2[:, 1], 'bx', label = l2)
plt.legend()
plt.xlabel("Birth Time")
plt.ylabel("Death Time")
def savePD(filename, I):
if os.path.exists(filename):
os.remove(filename)
fout = open(filename, "w")
for i in range(I.shape[0]):
fout.write("%g %g"%(I[i, 0], I[i, 1]))
if i < I.shape[0]-1:
fout.write("\n")
fout.close()
def getBottleneckDist(PD1, PD2):
savePD("PD1.txt", PD1)
savePD("PD2.txt", PD2)
proc = subprocess.Popen(["./bottleneck", "PD1.txt", "PD2.txt"], stdout=subprocess.PIPE)
return float(proc.stdout.readline())
| nilq/baby-python | python |
# foreign-state
| nilq/baby-python | python |
import tensorflow as tf
from tensorflow.python.platform import flags
import pandas as pd
import numpy as np
from pprint import pprint
from sklearn.model_selection import train_test_split
from data_postp.similarity_computations import transform_vectors_with_inter_class_pca
FLAGS = tf.python.platform.flags.FLAGS
METADATA_PICKLE_FILE = '/common/homes/students/rothfuss/Documents/selected_trainings/4_actNet_gdl/validate/metadata_and_hidden_rep_df_08-07-17_00-21-11_valid.pickle'
flags.DEFINE_float('learning_rate', 0.0001, 'learning rate')
flags.DEFINE_integer('training_epochs', 20000, 'training_epochs')
flags.DEFINE_integer('batch_size', 200, 'training_epochs')
flags.DEFINE_string('df_path', METADATA_PICKLE_FILE, 'training_epochs')
flags.DEFINE_string('label_column', 'category', 'name of column in df that contains the labels for the classification')
flags.DEFINE_float('keep_prob', 0.5, 'keep probability dropout')
NONLINEARITY = tf.nn.tanh #tf.nn.elu # tf.nn.relu
''' --- PREPARE DATA --- '''
def prepare_data():
"""
prepare the data so that X and Y is available as ndarray
X: ndarray of hidden_repr instances - shape (n_samples, num_dims_hidden_repr)
Y: ndarray of one-hot encoded labels corresponding to the hidden_reps - - shape (n_samples, num_classes)
"""
df = pd.read_pickle(FLAGS.df_path)
#df = transform_vectors_with_inter_class_pca(df, class_column=FLAGS.label_column, n_components=300)
assert 'hidden_repr' in df.columns and FLAGS.label_column in df.columns, "columns for hidden_representation and label must be in df.columns"
X = np.stack([h.flatten() for h in df['hidden_repr']])
n_classes = len(set(df[FLAGS.label_column]))
category_dict = dict([(category, i) for i, category in enumerate(list(set(df['category'])))])
category_dict_reversed = dict([(i, category) for i, category in enumerate(list(set(df['category'])))])
Y = tf.one_hot([category_dict[category] for category in df['category']], n_classes)
Y = tf.Session().run(Y)
assert X.shape[0] == Y.shape[0] == len(df.index)
return X, Y
def get_batch(X, Y, batch_size):
assert X.shape[0] == Y.shape[0]
r = np.random.randint(X.shape[0], size=batch_size)
return X[r,:], Y[r,:]
X, Y = prepare_data()
#train - test spplit
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
n_input, n_classes = X.shape[1], Y.shape[1]
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
n_hidden_1 = 200 # 1st layer number of features
n_hidden_2 = 200 # 2nd layer number of features
# Create model
def multilayer_perceptron(x, weights, biases, keep_prob):
# Hidden layer with nonlinear activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = NONLINEARITY(layer_1)
layer_1 = tf.nn.dropout(layer_1, keep_prob)
# Hidden layer with nonlinear activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = NONLINEARITY(layer_2)
layer_2 = tf.nn.dropout(layer_2, keep_prob)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases, keep_prob)
# Define loss and optimizer
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(loss)
# Define Accuracy
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(FLAGS.training_epochs):
avg_cost, avg_acc = 0, 0
total_batch = int(X_train.shape[0]/FLAGS.batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = get_batch(X_train, Y_train, FLAGS.batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c, a = sess.run([optimizer, loss, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: FLAGS.keep_prob})
# Compute average loss and averavge accuracy
avg_cost += c / total_batch
avg_acc += a / total_batch
# Display logs per epoch step
if epoch % 100 == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost), "acc=", "{:.9f}".format(avg_acc))
print("Test Accuracy:", sess.run(accuracy, feed_dict={x: X_test, y: Y_test, keep_prob: 1}))
print("Optimization Finished!")
# Test model
# Calculate accuracy
print("Accuracy:", sess.run([accuracy], feed_dict={x: X_test, y: Y_test, keep_prob: 1}))
| nilq/baby-python | python |
'''
bibtutils.slack.message
~~~~~~~~~~~~~~~~~~~~~~~
Enables sending messages to Slack.
'''
import os
import json
import logging
import requests
import datetime
logging.getLogger(__name__).addHandler(logging.NullHandler())
def send_message(webhook, title, text, color):
'''Sends a message to Slack.
.. code:: python
from bibtutils.slack.message import send_message
...
:type webhook: :py:class:`str`
:param webhook: a slack webhook in the standard format:
``'https://hooks.slack.com/services/{app_id}/{channel_id}/{hash}'``
:type title: :py:class:`str`
:param title: the title of the message. This will appear above the attachment.
Can be Slack-compatible markdown.
:type text: :py:class:`str`
:param text: the text to be included in the attachment.
Can be Slack-compatible markdown.
:type color: :py:class:`str`
:param color: the color to use for the Slack attachment border.
'''
msg = {
'blocks': [{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': title
}
}],
'attachments': [{
'color': color,
'blocks': [{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': text
}
}]
}]
}
r = requests.post(webhook, json=msg)
r.raise_for_status()
return
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from datetime import timedelta
import re
import pymorphy2
import collections
def calc_popular_nouns_by_weeks(articles_info, nouns_count=3):
morph = pymorphy2.MorphAnalyzer()
words_by_weeks = _group_words_by_weeks(articles_info)
nouns_by_week = {}
for week in sorted(words_by_weeks, key=lambda dates: dates[0], reverse=True):
words = words_by_weeks[week]
nouns = []
for word in words:
parsed = morph.parse(word)[0]
if 'NOUN' in parsed.tag:
nouns.append(parsed.normal_form)
nouns_by_week[week] = collections.Counter(nouns).most_common(nouns_count)
return nouns_by_week
def output_stat(nouns_by_week):
print('\nНачало недели | Конец недели | Популярные слова из заголовков ')
print('-----------------------------------------------------------------')
for week in nouns_by_week:
nouns = ['{noun}: {freq}'.format(noun=noun[0], freq=noun[1]) for noun in [stat for stat in nouns_by_week[week]]]
print('{week_begin} | {week_end} | {nouns}'.format(week_begin=week[0],
week_end=week[1],
nouns=', '.join(nouns)))
def _group_words_by_weeks(articles_info):
words_by_weeks = {}
dates = [info['publication_date_time']for info in articles_info]
dates.sort(reverse=True)
week_start_date = None
week_end_date = None
words = []
for index, date in enumerate(dates):
if not week_end_date:
week_end_date = date.date()
week_start_date = week_end_date - timedelta(days=week_end_date.weekday())
if not (week_start_date <= date.date() <= week_end_date):
week_start_date = date.date() - timedelta(days=date.weekday())
week_end_date = date.date() + timedelta(days=6 - date.weekday())
words = []
words += re.sub('[^a-zа-я]', ' ', articles_info[index]['title'].lower().strip()).split()
words_by_weeks[(week_start_date, week_end_date)] = words
return words_by_weeks
| nilq/baby-python | python |
"""
@UpdateTime: 2017/12/7
@Author: liutao
"""
from django.db import models
# Create your models here.
#产品表
class Product(models.Model):
p_id = models.AutoField(primary_key=True)
p_name = models.CharField(max_length=150)
p_money = models.IntegerField()
p_number = models.IntegerField()
p_info = models.TextField()
u = models.ForeignKey('User', models.DO_NOTHING)
class Meta:
managed = True
db_table = 'product'
#用户表
class User(models.Model):
u_id = models.AutoField(primary_key=True)
u_name = models.CharField(max_length=50)
u_passwd = models.CharField(max_length=50)
u_touxiang = models.CharField(max_length=100)
class Meta:
managed = True
db_table = 'user'
#图片表
class Images(models.Model):
img_id = models.AutoField(primary_key=True)
img_address = models.CharField(max_length=200)
p_id = models.IntegerField()
class Meta:
managed = True
db_table = 'images'
#订单表
class Order(models.Model):
o_id = models.AutoField(primary_key=True)
p_id = models.IntegerField()
u_id = models.IntegerField()
b_id = models.IntegerField()
p_name = models.CharField(max_length=100)
p_money = models.IntegerField()
time = models.DateTimeField()
class Meta:
managed = True
db_table = 'order'
| nilq/baby-python | python |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
import json
import pytest
from awsiot.greengrasscoreipc.model import (
JsonMessage,
SubscriptionResponseMessage
)
sys.path.append("src/")
testTokenJson = [
{
"id": "0895c16b9de9e000",
"description": "test's Token",
"token": "testAdminToken",
"status": "active",
"userName": "test",
"userID": "0895c16b80a9e000",
"permissions": [
"read:authorizations",
"write:authorizations"
]
},
{
"id": "0895c16bfba9e000",
"description": "greengrass_read",
"token": "testROToken",
"status": "active",
"userName": "test",
"userID": "0895c16b80a9e000",
"permissions": [
"read:orgs/d13dcc4c7cd25bf9/buckets/2f1dc2bba2275383"
]
},
{
"id": "0895c16c8ee9e000",
"description": "greengrass_readwrite",
"token": "testRWToken",
"status": "active",
"userName": "test",
"userID": "0895c16b80a9e000",
"permissions": [
"read:orgs/d13dcc4c7cd25bf9/buckets/2f1dc2bba2275383",
"write:orgs/d13dcc4c7cd25bf9/buckets/2f1dc2bba2275383"
]
}
]
testMetadataJson = {
'InfluxDBContainerName': 'greengrass_InfluxDB',
'InfluxDBOrg': 'greengrass',
'InfluxDBBucket': 'greengrass-telemetry',
'InfluxDBPort': '8086',
'InfluxDBInterface': '127.0.0.1',
'InfluxDBServerProtocol': 'https',
'InfluxDBSkipTLSVerify': 'true',
}
testPublishJson = testMetadataJson
testPublishJson['InfluxDBTokenAccessType'] = "RW"
testPublishJson['InfluxDBToken'] = "testRWToken"
def testHandleValidStreamEvent(mocker):
mock_ipc_client = mocker.patch("awsiot.greengrasscoreipc.connect")
mock_publish_response = mocker.patch('src.influxDBTokenStreamHandler.InfluxDBTokenStreamHandler.publish_response')
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps(testMetadataJson), json.dumps(testTokenJson), "test/topic")
message = JsonMessage(message={"action": "RetrieveToken", "accessLevel": "RW"})
response_message = SubscriptionResponseMessage(json_message=message)
t = handler.handle_stream_event(response_message)
mock_publish_response.assert_called_with(testPublishJson)
assert mock_ipc_client.call_count == 1
assert mock_publish_response.call_count == 1
def testHandleInvalidStreamEvent(mocker):
mock_ipc_client = mocker.patch("awsiot.greengrasscoreipc.connect")
mock_publish_response = mocker.patch('src.influxDBTokenStreamHandler.InfluxDBTokenStreamHandler.publish_response')
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps({}), json.dumps(testTokenJson), "test")
message = JsonMessage(message={})
response_message = SubscriptionResponseMessage(json_message=message)
handler.handle_stream_event(response_message)
assert mock_ipc_client.call_count == 1
assert not mock_publish_response.called
def testHandleInvalidRequestType(mocker):
mock_ipc_client = mocker.patch("awsiot.greengrasscoreipc.connect")
mock_publish_response = mocker.patch('src.influxDBTokenStreamHandler.InfluxDBTokenStreamHandler.publish_response')
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps({}), json.dumps(testTokenJson), "test")
message = JsonMessage(message={"action": "invalid", "accessLevel": "RW"})
response_message = SubscriptionResponseMessage(json_message=message)
handler.handle_stream_event(response_message)
assert mock_ipc_client.call_count == 1
assert not mock_publish_response.called
def testHandleInvalidTokenRequestType(mocker):
mock_ipc_client = mocker.patch("awsiot.greengrasscoreipc.connect")
mock_publish_response = mocker.patch('src.influxDBTokenStreamHandler.InfluxDBTokenStreamHandler.publish_response')
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps({}), json.dumps(testTokenJson), "test")
message = JsonMessage(message={"action": "RetrieveToken", "accessLevel": "invalid"})
response_message = SubscriptionResponseMessage(json_message=message)
handler.handle_stream_event(response_message)
assert mock_ipc_client.call_count == 1
assert not mock_publish_response.called
def testHandleNullStreamEvent(mocker):
mock_ipc_client = mocker.patch("awsiot.greengrasscoreipc.connect")
mock_publish_response = mocker.patch('src.influxDBTokenStreamHandler.InfluxDBTokenStreamHandler.publish_response')
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps(testMetadataJson), json.dumps(testTokenJson), "test")
response_message = None
handler.handle_stream_event(response_message)
assert mock_ipc_client.call_count == 1
assert not mock_publish_response.called
def testGetValidPublishJson(mocker):
mocker.patch("awsiot.greengrasscoreipc.connect")
import src.influxDBTokenStreamHandler as streamHandler
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps(testMetadataJson), json.dumps(testTokenJson), "test/topic")
message = json.loads('{"action": "RetrieveToken", "accessLevel": "RW"}')
publish_json = handler.get_publish_json(message)
assert publish_json == testPublishJson
message = json.loads('{"action": "RetrieveToken", "accessLevel": "RO"}')
publish_json = handler.get_publish_json(message)
testPublishJson['InfluxDBTokenAccessType'] = "RO"
testPublishJson['InfluxDBToken'] = "testROToken"
assert publish_json == testPublishJson
message = json.loads('{"action": "RetrieveToken", "accessLevel": "Admin"}')
publish_json = handler.get_publish_json(message)
testPublishJson['InfluxDBTokenAccessType'] = "Admin"
testPublishJson['InfluxDBToken'] = "testAdminToken"
assert publish_json == testPublishJson
def testGetInvalidPublishJson(mocker):
mocker.patch("awsiot.greengrasscoreipc.connect")
import src.influxDBTokenStreamHandler as streamHandler
testTokenJson[0]['token'] = ""
testTokenJson[1]['token'] = ""
testTokenJson[2]['token'] = ""
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps(testMetadataJson), json.dumps(testTokenJson), "test/topic")
with pytest.raises(ValueError, match='Failed to parse InfluxDB RW token!'):
message = json.loads('{"action": "RetrieveToken", "accessLevel": "RW"}')
handler.get_publish_json(message)
with pytest.raises(ValueError, match='Failed to parse InfluxDB RO token!'):
message = json.loads('{"action": "RetrieveToken", "accessLevel": "RO"}')
handler.get_publish_json(message)
with pytest.raises(ValueError, match='Failed to parse InfluxDB Admin token!'):
message = json.loads('{"action": "RetrieveToken", "accessLevel": "Admin"}')
handler.get_publish_json(message)
testTokenJson[0]['description'] = ""
handler = streamHandler.InfluxDBTokenStreamHandler(json.dumps(testMetadataJson), json.dumps(testTokenJson), "test/topic")
message = json.loads('{"action": "RetrieveToken", "accessLevel": "Admin"}')
retval = handler.get_publish_json(message)
assert retval is None
| nilq/baby-python | python |
from .property import LiteralProperty
import packaging.version as pv
import rdflib
class VersionProperty(LiteralProperty):
def convert_to_user(self, value):
result = str(value)
if result == '':
# special case, empty strings are equivalent to None
return None
return result
def convert_from_user(self, value):
# Empty string is equivalent to None
if value == '':
value = None
# None is ok iff upper bound is 1 and lower bound is 0.
# If upper bound > 1, attribute is a list and None is not a valid list
# If lower bound > 0, attribute must have a value, so None is unacceptable
if value is None and self.upper_bound == 1 and self.lower_bound == 0:
return None
try:
version = pv.Version(value)
except pv.InvalidVersion as e:
raise ValueError(e)
except TypeError as e:
raise ValueError(e)
return rdflib.Literal(str(version))
@staticmethod
def _make_version(major: int, minor: int, micro: int) -> pv.Version:
return pv.Version(f'{major}.{minor}.{micro}')
@staticmethod
def increment_major(version: str) -> str:
old = pv.Version(version)
new = VersionProperty._make_version(old.major + 1, old.minor, old.micro)
return str(new)
| nilq/baby-python | python |
from __future__ import print_function, absolute_import
from os import getenv
from time import sleep
import click
import json
import getpass
from datetime import datetime, timedelta, timezone
from ecs_deploy import VERSION
from ecs_deploy.ecs import DeployAction, DeployBlueGreenAction, ScaleAction, RunAction, EcsClient, DiffAction, \
TaskPlacementError, EcsError, UpdateAction, LAUNCH_TYPE_EC2, LAUNCH_TYPE_FARGATE
from ecs_deploy.newrelic import Deployment, NewRelicException
from ecs_deploy.slack import SlackNotification
@click.group()
@click.version_option(version=VERSION, prog_name='ecs-deploy')
def ecs(): # pragma: no cover
pass
def get_client(access_key_id, secret_access_key, region, profile):
return EcsClient(access_key_id, secret_access_key, region, profile)
@click.command()
@click.argument('cluster')
@click.argument('service')
@click.option('-t', '--tag', help='Changes the tag for ALL container images')
@click.option('-i', '--image', type=(str, str), multiple=True, help='Overwrites the image for a container: <container> <image>')
@click.option('-c', '--command', type=(str, str), multiple=True, help='Overwrites the command in a container: <container> <command>')
@click.option('-e', '--env', type=(str, str, str), multiple=True, help='Adds or changes an environment variable: <container> <name> <value>')
@click.option('-s', '--secret', type=(str, str, str), multiple=True, help='Adds or changes a secret environment variable from the AWS Parameter Store (Not available for Fargate): <container> <name> <parameter name>')
@click.option('-r', '--role', type=str, help='Sets the task\'s role ARN: <task role ARN>')
@click.option('-x', '--execution-role', type=str, help='Sets the execution\'s role ARN: <execution role ARN>')
@click.option('--task', type=str, help='Task definition to be deployed. Can be a task ARN or a task family with optional revision')
@click.option('--region', required=False, help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', required=False, help='AWS access key id')
@click.option('--secret-access-key', required=False, help='AWS secret access key')
@click.option('--profile', required=False, help='AWS configuration profile name')
@click.option('--timeout', required=False, default=300, type=int, help='Amount of seconds to wait for deployment before command fails (default: 300). To disable timeout (fire and forget) set to -1')
@click.option('--ignore-warnings', is_flag=True, help='Do not fail deployment on warnings (port already in use or insufficient memory/CPU)')
@click.option('--newrelic-apikey', required=False, help='New Relic API Key for recording the deployment. Can also be defined via environment variable NEW_RELIC_API_KEY')
@click.option('--newrelic-appid', required=False, help='New Relic App ID for recording the deployment. Can also be defined via environment variable NEW_RELIC_APP_ID')
@click.option('--newrelic-region', required=False, help='New Relic region: US or EU (default: US). Can also be defined via environment variable NEW_RELIC_REGION')
@click.option('--comment', required=False, help='Description/comment for recording the deployment')
@click.option('--user', required=False, help='User who executes the deployment (used for recording)')
@click.option('--diff/--no-diff', default=True, help='Print which values were changed in the task definition (default: --diff)')
@click.option('--deregister/--no-deregister', default=True, help='Deregister or keep the old task definition (default: --deregister)')
@click.option('--rollback/--no-rollback', default=False, help='Rollback to previous revision, if deployment failed (default: --no-rollback)')
@click.option('--exclusive-env', is_flag=True, default=False, help='Set the given environment variables exclusively and remove all other pre-existing env variables from all containers')
@click.option('--exclusive-secrets', is_flag=True, default=False, help='Set the given secrets exclusively and remove all other pre-existing secrets from all containers')
@click.option('--sleep-time', default=1, type=int, help='Amount of seconds to wait between each check of the service (default: 1)')
@click.option('--slack-url', required=False, help='Webhook URL of the Slack integration. Can also be defined via environment variable SLACK_URL')
@click.option('--slack-service-match', default=".*", required=False, help='A regular expression for defining, which services should be notified. (default: .* =all). Can also be defined via environment variable SLACK_SERVICE_MATCH')
@click.option('--cd-application-name', required=False, help='CodeDeploy Application name from Blue/Green deployment')
def deploy(cluster, service, tag, image, command, env, secret, role, execution_role, task, region, access_key_id, secret_access_key, profile, timeout, newrelic_apikey, newrelic_appid, newrelic_region, comment, user, ignore_warnings, diff, deregister, rollback, exclusive_env, exclusive_secrets, sleep_time, slack_url, slack_service_match='.*', cd_application_name=None):
"""
Redeploy or modify a service.
\b
CLUSTER is the name of your cluster (e.g. 'my-custer') within ECS.
SERVICE is the name of your service (e.g. 'my-app') within ECS.
When not giving any other options, the task definition will not be changed.
It will just be duplicated, so that all container images will be pulled
and redeployed.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
if cd_application_name:
deployment = DeployBlueGreenAction(client, cluster, service, cd_application_name=cd_application_name)
else:
deployment = DeployAction(client, cluster, service)
td = get_task_definition(deployment, task)
td.set_images(tag, **{key: value for (key, value) in image})
td.set_commands(**{key: value for (key, value) in command})
td.set_environment(env, exclusive_env)
td.set_secrets(secret, exclusive_secrets)
td.set_role_arn(role)
td.set_execution_role_arn(execution_role)
slack = SlackNotification(
getenv('SLACK_URL', slack_url),
getenv('SLACK_SERVICE_MATCH', slack_service_match)
)
slack.notify_start(cluster, tag, td, comment, user, service=service)
click.secho('Deploying based on task definition: %s\n' % td.family_revision)
if diff:
print_diff(td)
new_td = create_task_definition(deployment, td)
try:
deploy_task_definition(
deployment=deployment,
task_definition=new_td,
title='Deploying new task definition',
success_message='Deployment successful',
failure_message='Deployment failed',
timeout=timeout,
deregister=deregister,
previous_task_definition=td,
ignore_warnings=ignore_warnings,
sleep_time=sleep_time
)
except TaskPlacementError as e:
slack.notify_failure(cluster, str(e), service=service)
if rollback:
click.secho('%s\n' % str(e), fg='red', err=True)
rollback_task_definition(deployment, td, new_td, sleep_time=sleep_time)
exit(1)
else:
raise
record_deployment(tag, newrelic_apikey, newrelic_appid, newrelic_region, comment, user)
slack.notify_success(cluster, td.revision, service=service)
except (EcsError, NewRelicException) as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
@click.command()
@click.argument('cluster')
@click.argument('task')
@click.argument('rule')
@click.option('-i', '--image', type=(str, str), multiple=True, help='Overwrites the image for a container: <container> <image>')
@click.option('-t', '--tag', help='Changes the tag for ALL container images')
@click.option('-c', '--command', type=(str, str), multiple=True, help='Overwrites the command in a container: <container> <command>')
@click.option('-e', '--env', type=(str, str, str), multiple=True, help='Adds or changes an environment variable: <container> <name> <value>')
@click.option('-r', '--role', type=str, help='Sets the task\'s role ARN: <task role ARN>')
@click.option('--region', help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', help='AWS access key id')
@click.option('--secret-access-key', help='AWS secret access key')
@click.option('--newrelic-apikey', required=False, help='New Relic API Key for recording the deployment. Can also be defined via environment variable NEW_RELIC_API_KEY')
@click.option('--newrelic-appid', required=False, help='New Relic App ID for recording the deployment. Can also be defined via environment variable NEW_RELIC_APP_ID')
@click.option('--newrelic-region', required=False, help='New Relic region: US or EU (default: US). Can also be defined via environment variable NEW_RELIC_REGION')
@click.option('--comment', required=False, help='Description/comment for recording the deployment')
@click.option('--user', required=False, help='User who executes the deployment (used for recording)')
@click.option('--profile', help='AWS configuration profile name')
@click.option('--diff/--no-diff', default=True, help='Print what values were changed in the task definition')
@click.option('--deregister/--no-deregister', default=True, help='Deregister or keep the old task definition (default: --deregister)')
@click.option('--rollback/--no-rollback', default=False, help='Rollback to previous revision, if deployment failed (default: --no-rollback)')
@click.option('--slack-url', required=False, help='Webhook URL of the Slack integration. Can also be defined via environment variable SLACK_URL')
@click.option('--slack-service-match', default=".*", required=False, help='A regular expression for defining, deployments of which crons should be notified. (default: .* =all). Can also be defined via environment variable SLACK_SERVICE_MATCH')
def cron(cluster, task, rule, image, tag, command, env, role, region, access_key_id, secret_access_key, newrelic_apikey, newrelic_appid, newrelic_region, comment, user, profile, diff, deregister, rollback, slack_url, slack_service_match):
"""
Update a scheduled task.
\b
CLUSTER is the name of your cluster (e.g. 'my-custer') within ECS.
TASK is the name of your task definition (e.g. 'my-task') within ECS.
RULE is the name of the rule to use the new task definition.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
action = RunAction(client, cluster)
td = action.get_task_definition(task)
click.secho('Update task definition based on: %s\n' % td.family_revision)
td.set_images(tag, **{key: value for (key, value) in image})
td.set_commands(**{key: value for (key, value) in command})
td.set_environment(env)
td.set_role_arn(role)
slack = SlackNotification(
getenv('SLACK_URL', slack_url),
getenv('SLACK_SERVICE_MATCH', slack_service_match)
)
slack.notify_start(cluster, tag, td, comment, user, rule=rule)
if diff:
print_diff(td)
new_td = create_task_definition(action, td)
client.update_rule(
cluster=cluster,
rule=rule,
task_definition=new_td
)
click.secho('Updating scheduled task')
click.secho('Successfully updated scheduled task %s\n' % rule, fg='green')
slack.notify_success(cluster, td.revision, rule=rule)
record_deployment(tag, newrelic_apikey, newrelic_appid, newrelic_region, comment, user)
if deregister:
deregister_task_definition(action, td)
except EcsError as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
@click.command()
@click.argument('task')
@click.option('-i', '--image', type=(str, str), multiple=True, help='Overwrites the image for a container: <container> <image>')
@click.option('-t', '--tag', help='Changes the tag for ALL container images')
@click.option('-c', '--command', type=(str, str), multiple=True, help='Overwrites the command in a container: <container> <command>')
@click.option('-e', '--env', type=(str, str, str), multiple=True, help='Adds or changes an environment variable: <container> <name> <value>')
@click.option('-s', '--secret', type=(str, str, str), multiple=True, help='Adds or changes a secret environment variable from the AWS Parameter Store (Not available for Fargate): <container> <name> <parameter name>')
@click.option('-r', '--role', type=str, help='Sets the task\'s role ARN: <task role ARN>')
@click.option('--region', help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', help='AWS access key id')
@click.option('--secret-access-key', help='AWS secret access key')
@click.option('--profile', help='AWS configuration profile name')
@click.option('--diff/--no-diff', default=True, help='Print what values were changed in the task definition')
@click.option('--exclusive-env', is_flag=True, default=False, help='Set the given environment variables exclusively and remove all other pre-existing env variables from all containers')
@click.option('--exclusive-secrets', is_flag=True, default=False, help='Set the given secrets exclusively and remove all other pre-existing secrets from all containers')
@click.option('--deregister/--no-deregister', default=True, help='Deregister or keep the old task definition (default: --deregister)')
def update(task, image, tag, command, env, secret, role, region, access_key_id, secret_access_key, profile, diff, exclusive_env, exclusive_secrets, deregister):
"""
Update a task definition.
\b
TASK is the name of your task definition family (e.g. 'my-task') within ECS.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
action = UpdateAction(client)
td = action.get_task_definition(task)
click.secho('Update task definition based on: %s\n' % td.family_revision)
td.set_images(tag, **{key: value for (key, value) in image})
td.set_commands(**{key: value for (key, value) in command})
td.set_environment(env, exclusive_env)
td.set_secrets(secret, exclusive_secrets)
td.set_role_arn(role)
if diff:
print_diff(td)
create_task_definition(action, td)
if deregister:
deregister_task_definition(action, td)
except EcsError as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
@click.command()
@click.argument('cluster')
@click.argument('service')
@click.argument('desired_count', type=int)
@click.option('--region', help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', help='AWS access key id')
@click.option('--secret-access-key', help='AWS secret access key')
@click.option('--profile', help='AWS configuration profile name')
@click.option('--timeout', default=300, type=int, help='Amount of seconds to wait for deployment before command fails (default: 300). To disable timeout (fire and forget) set to -1')
@click.option('--ignore-warnings', is_flag=True, help='Do not fail deployment on warnings (port already in use or insufficient memory/CPU)')
@click.option('--sleep-time', default=1, type=int, help='Amount of seconds to wait between each check of the service (default: 1)')
def scale(cluster, service, desired_count, access_key_id, secret_access_key, region, profile, timeout, ignore_warnings, sleep_time):
"""
Scale a service up or down.
\b
CLUSTER is the name of your cluster (e.g. 'my-custer') within ECS.
SERVICE is the name of your service (e.g. 'my-app') within ECS.
DESIRED_COUNT is the number of tasks your service should run.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
scaling = ScaleAction(client, cluster, service)
click.secho('Updating service')
scaling.scale(desired_count)
click.secho(
'Successfully changed desired count to: %s\n' % desired_count,
fg='green'
)
wait_for_finish(
action=scaling,
timeout=timeout,
title='Scaling service',
success_message='Scaling successful',
failure_message='Scaling failed',
ignore_warnings=ignore_warnings,
sleep_time=sleep_time
)
except EcsError as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
@click.command()
@click.argument('cluster')
@click.argument('task')
@click.argument('count', required=False, default=1)
@click.option('-c', '--command', type=(str, str), multiple=True, help='Overwrites the command in a container: <container> <command>')
@click.option('-e', '--env', type=(str, str, str), multiple=True, help='Adds or changes an environment variable: <container> <name> <value>')
@click.option('-s', '--secret', type=(str, str, str), multiple=True, help='Adds or changes a secret environment variable from the AWS Parameter Store (Not available for Fargate): <container> <name> <parameter name>')
@click.option('--launchtype', type=click.Choice([LAUNCH_TYPE_EC2, LAUNCH_TYPE_FARGATE]), default=LAUNCH_TYPE_EC2, help='ECS Launch type (default: EC2)')
@click.option('--subnet', type=str, multiple=True, help='A subnet ID to launch the task within. Required for launch type FARGATE (multiple values possible)')
@click.option('--securitygroup', type=str, multiple=True, help='A security group ID to launch the task within. Required for launch type FARGATE (multiple values possible)')
@click.option('--public-ip', is_flag=True, default=False, help='Should a public IP address be assigned to the task (default: False)')
@click.option('--region', help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', help='AWS access key id')
@click.option('--secret-access-key', help='AWS secret access key')
@click.option('--profile', help='AWS configuration profile name')
@click.option('--diff/--no-diff', default=True, help='Print what values were changed in the task definition')
def run(cluster, task, count, command, env, secret, launchtype, subnet, securitygroup, public_ip, region, access_key_id, secret_access_key, profile, diff):
"""
Run a one-off task.
\b
CLUSTER is the name of your cluster (e.g. 'my-custer') within ECS.
TASK is the name of your task definition (e.g. 'my-task') within ECS.
COUNT is the number of tasks your service should run.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
action = RunAction(client, cluster)
td = action.get_task_definition(task)
td.set_commands(**{key: value for (key, value) in command})
td.set_environment(env)
td.set_secrets(secret)
if diff:
print_diff(td, 'Using task definition: %s' % task)
action.run(td, count, 'ECS Deploy', launchtype, subnet, securitygroup, public_ip)
click.secho(
'Successfully started %d instances of task: %s' % (
len(action.started_tasks),
td.family_revision
),
fg='green'
)
for started_task in action.started_tasks:
click.secho('- %s' % started_task['taskArn'], fg='green')
click.secho(' ')
except EcsError as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
@click.command()
@click.argument('task')
@click.argument('revision_a')
@click.argument('revision_b')
@click.option('--region', help='AWS region (e.g. eu-central-1)')
@click.option('--access-key-id', help='AWS access key id')
@click.option('--secret-access-key', help='AWS secret access key')
@click.option('--profile', help='AWS configuration profile name')
def diff(task, revision_a, revision_b, region, access_key_id, secret_access_key, profile):
"""
Compare two task definition revisions.
\b
TASK is the name of your task definition (e.g. 'my-task') within ECS.
COUNT is the number of tasks your service should run.
"""
try:
client = get_client(access_key_id, secret_access_key, region, profile)
action = DiffAction(client)
td_a = action.get_task_definition('%s:%s' % (task, revision_a))
td_b = action.get_task_definition('%s:%s' % (task, revision_b))
result = td_a.diff_raw(td_b)
for difference in result:
if difference[0] == 'add':
click.secho('%s: %s' % (difference[0], difference[1]), fg='green')
for added in difference[2]:
click.secho(' + %s: %s' % (added[0], json.dumps(added[1])), fg='green')
if difference[0] == 'change':
click.secho('%s: %s' % (difference[0], difference[1]), fg='yellow')
click.secho(' - %s' % json.dumps(difference[2][0]), fg='red')
click.secho(' + %s' % json.dumps(difference[2][1]), fg='green')
if difference[0] == 'remove':
click.secho('%s: %s' % (difference[0], difference[1]), fg='red')
for removed in difference[2]:
click.secho(' - %s: %s' % removed, fg='red')
except EcsError as e:
click.secho('%s\n' % str(e), fg='red', err=True)
exit(1)
def wait_for_finish(action, timeout, title, success_message, failure_message,
ignore_warnings, sleep_time=1):
click.secho(title, nl=False)
waiting_timeout = datetime.now() + timedelta(seconds=timeout)
service = action.get_service()
inspected_until = None
if timeout == -1:
waiting = False
else:
waiting = True
while waiting and datetime.now() < waiting_timeout:
click.secho('.', nl=False)
service = action.get_service()
inspected_until = inspect_errors(
service=service,
failure_message=failure_message,
ignore_warnings=ignore_warnings,
since=inspected_until,
timeout=False
)
waiting = not action.is_deployed(service)
if waiting:
sleep(sleep_time)
inspect_errors(
service=service,
failure_message=failure_message,
ignore_warnings=ignore_warnings,
since=inspected_until,
timeout=waiting
)
click.secho('\n%s\n' % success_message, fg='green')
def deploy_task_definition(deployment, task_definition, title, success_message,
failure_message, timeout, deregister,
previous_task_definition, ignore_warnings, sleep_time):
click.secho('Updating service')
deploy_response = deployment.deploy(task_definition)
message = 'Successfully changed task definition to: %s:%s\n' % (
task_definition.family,
task_definition.revision
)
if type(deployment) == DeployBlueGreenAction:
click.secho('\nDeployment created: %s' % deploy_response, fg='green')
click.secho(message, fg='green')
wait_for_finish(
action=deployment,
timeout=timeout,
title=title,
success_message=success_message,
failure_message=failure_message,
ignore_warnings=ignore_warnings,
sleep_time=sleep_time
)
if deregister:
deregister_task_definition(deployment, previous_task_definition)
def get_task_definition(action, task):
if task:
task_definition = action.get_task_definition(task)
else:
task_definition = action.get_current_task_definition(action.service)
return task_definition
def create_task_definition(action, task_definition):
click.secho('Creating new task definition revision')
new_td = action.update_task_definition(task_definition)
click.secho(
'Successfully created revision: %d\n' % new_td.revision,
fg='green'
)
return new_td
def deregister_task_definition(action, task_definition):
click.secho('Deregister task definition revision')
action.deregister_task_definition(task_definition)
click.secho(
'Successfully deregistered revision: %d\n' % task_definition.revision,
fg='green'
)
def rollback_task_definition(deployment, old, new, timeout=600, sleep_time=1):
click.secho(
'Rolling back to task definition: %s\n' % old.family_revision,
fg='yellow',
)
deploy_task_definition(
deployment=deployment,
task_definition=old,
title='Deploying previous task definition',
success_message='Rollback successful',
failure_message='Rollback failed. Please check ECS Console',
timeout=timeout,
deregister=True,
previous_task_definition=new,
ignore_warnings=False,
sleep_time=sleep_time
)
click.secho(
'Deployment failed, but service has been rolled back to previous '
'task definition: %s\n' % old.family_revision, fg='yellow', err=True
)
def record_deployment(revision, api_key, app_id, region, comment, user):
api_key = getenv('NEW_RELIC_API_KEY', api_key)
app_id = getenv('NEW_RELIC_APP_ID', app_id)
region = getenv('NEW_RELIC_REGION', region)
if not revision or not api_key or not app_id:
return False
user = user or getpass.getuser()
click.secho('Recording deployment in New Relic', nl=False)
deployment = Deployment(api_key, app_id, user, region)
deployment.deploy(revision, '', comment)
click.secho('\nDone\n', fg='green')
return True
def print_diff(task_definition, title='Updating task definition'):
if task_definition.diff:
click.secho(title)
for diff in task_definition.diff:
click.secho(str(diff), fg='blue')
click.secho('')
def inspect_errors(service, failure_message, ignore_warnings, since, timeout):
error = False
last_error_timestamp = since
warnings = service.get_warnings(since)
for timestamp in warnings:
message = warnings[timestamp]
click.secho('')
if ignore_warnings:
last_error_timestamp = timestamp
click.secho(
text='%s\nWARNING: %s' % (timestamp, message),
fg='yellow',
err=False
)
click.secho('Continuing.', nl=False)
else:
click.secho(
text='%s\nERROR: %s\n' % (timestamp, message),
fg='red',
err=True
)
error = True
if service.older_errors:
click.secho('')
click.secho('Older errors', fg='yellow', err=True)
for timestamp in service.older_errors:
click.secho(
text='%s\n%s\n' % (timestamp, service.older_errors[timestamp]),
fg='yellow',
err=True
)
if timeout:
error = True
failure_message += ' due to timeout. Please see: ' \
'https://github.com/fabfuel/ecs-deploy#timeout'
click.secho('')
if error:
raise TaskPlacementError(failure_message)
return last_error_timestamp
ecs.add_command(deploy)
ecs.add_command(scale)
ecs.add_command(run)
ecs.add_command(cron)
ecs.add_command(update)
ecs.add_command(diff)
if __name__ == '__main__': # pragma: no cover
ecs()
| nilq/baby-python | python |
"""helpers"""
import mimetypes
import os
import pkgutil
import posixpath
import sys
import time
import socket
import unicodedata
from threading import RLock
from time import time
from zlib import adler32
from werkzeug.datastructures import Headers
from werkzeug.exceptions import (BadRequest, NotFound,
RequestedRangeNotSatisfiable)
from werkzeug.urls import url_quote
from werkzeug.wsgi import wrap_file
from werkzeug.routing import BuildError
from jinja2 import FileSystemLoader
from __compat import string_types, text_type, PY2
from __globals import current_app, request, _request_ctx_stack, _app_ctx_stack, session
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def get_debug_flag():
"""Get whether debug mode should be enabled for the app, indicated
by the :envvar:`FLASK_DEBUG` environment variable. The default is
``True`` if :func:`.get_env` returns ``'development'``, or ``False``
otherwise.
"""
val = os.environ.get('FLASK_DEBUG')
if not val:
return get_env() == 'development'
return val.lower() not in ('0', 'false', 'no')
def get_env():
"""Get the environment the app is running in, indicated by the
:envvar:`FLASK_ENV` environment variable. The default is
``'production'``.
"""
return os.environ.get('FLASK_ENV') or 'production'
def get_load_dotenv(default=True):
"""Get whether the user has disabled loading dotenv files by setting
:envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the
files.
:param default: What to return if the env var isn't set.
"""
val = os.environ.get('FLASK_SKIP_DOTENV')
if not val:
return default
return val.lower() in ('0', 'false', 'no')
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False, last_modified=None):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an ``X-Sendfile`` header. This however
requires support of the underlying webserver for ``X-Sendfile``.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
ETags will also be attached automatically if a `filename` is provided. You
can turn this off by setting `add_etags=False`.
If `conditional=True` and `filename` is provided, this method will try to
upgrade the response stream to support range requests. This will allow
the request to be answered with partial content response.
Please never pass filenames to this function from user sources;
you should use :func:`send_from_directory` instead.
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
.. versionchanged:: 0.12
The filename is no longer automatically inferred from file objects. If
you want to use automatic mimetype and etag support, pass a filepath via
`filename_or_fp` or `attachment_filename`.
.. versionchanged:: 0.12
The `attachment_filename` is preferred over `filename` for MIME-type
detection.
.. versionchanged:: 1.0
UTF-8 filenames, as specified in `RFC 2231`_, are supported.
.. _RFC 2231: https://tools.ietf.org/html/rfc2231#section-4
:param filename_or_fp: the filename of the file to send.
This is relative to the :attr:`~Flask.root_path`
if a relative path is specified.
Alternatively a file object might be provided in
which case ``X-Sendfile`` might not work and fall
back to the traditional method. Make sure that the
file pointer is positioned at the start of data to
send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided. If a file path is
given, auto detection happens as fallback, otherwise an
error will be raised.
:param as_attachment: set to ``True`` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to ``False`` to disable attaching of etags.
:param conditional: set to ``True`` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When ``None``
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
:param last_modified: set the ``Last-Modified`` header to this value,
a :class:`~datetime.datetime` or timestamp.
If a file was passed, this overrides its mtime.
"""
mtime = None
fsize = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
file = None
if attachment_filename is None:
attachment_filename = os.path.basename(filename)
else:
file = filename_or_fp
filename = None
if mimetype is None:
if attachment_filename is not None:
mimetype = mimetypes.guess_type(attachment_filename)[0] \
or 'application/octet-stream'
if mimetype is None:
raise ValueError(
'Unable to infer MIME-type because no filename is available. '
'Please set either `attachment_filename`, pass a filepath to '
'`filename_or_fp` or set your own MIME-type via `mimetype`.'
)
headers = Headers()
if as_attachment:
if attachment_filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
try:
attachment_filename = attachment_filename.encode('latin-1')
except UnicodeEncodeError:
filenames = {
'filename': unicodedata.normalize(
'NFKD', attachment_filename).encode('latin-1', 'ignore'),
'filename*': "UTF-8''%s" % url_quote(attachment_filename),
}
else:
filenames = {'filename': attachment_filename}
headers.add('Content-Disposition', 'attachment', **filenames)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
fsize = os.path.getsize(filename)
headers['Content-Length'] = fsize
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
fsize = os.path.getsize(filename)
headers['Content-Length'] = fsize
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers, # pylint: disable=invalid-name
direct_passthrough=True)
if last_modified is not None:
rv.last_modified = last_modified
elif mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
from warnings import warn # pylint: disable=import-outside-toplevel
try:
rv.set_etag('%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
except OSError:
warn('Access %s failed, maybe it does not exist, so ignore etags in '
'headers' % filename, stacklevel=2)
if conditional:
try:
rv = rv.make_conditional(request, accept_ranges=True, # pylint: disable=invalid-name
complete_length=fsize)
except RequestedRangeNotSatisfiable:
if file is not None:
file.close()
raise
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either ``X-Sendfile`` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
try:
if not os.path.isfile(filename):
raise NotFound()
except (TypeError, ValueError) as type_value_error:
raise BadRequest() from type_value_error
options.setdefault('conditional', True)
return send_file(filename, **options)
def total_seconds(td): # pylint: disable=invalid-name
"""Returns the total seconds from a timedelta object.
:param timedelta td: the timedelta to be converted in seconds
:returns: number of seconds
:rtype: int
"""
return td.days * 60 * 60 * 24 + td.seconds
def safe_join(directory, *pathnames):
"""Safely join `directory` and zero or more untrusted `pathnames`
components.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the trusted base directory.
:param pathnames: the untrusted pathnames relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed
paths fall out of its boundaries.
"""
parts = [directory]
for filename in pathnames:
if filename != '':
filename = posixpath.normpath(filename)
if (
any(sep in filename for sep in _os_alt_seps)
or os.path.isabs(filename)
or filename == '..'
or filename.startswith('../')
):
raise NotFound()
parts.append(filename)
return posixpath.join(*parts)
class locked_cached_property(object): # pylint: disable=invalid-name
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None): # pylint: disable=redefined-builtin
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def _matching_loader_thinks_module_is_package(loader, mod_name):
"""Given the loader that loaded a module and the module this function
attempts to figure out if the given module is actually a package.
"""
# If the loader can tell us if something is a package, we can
# directly ask the loader.
if hasattr(loader, 'is_package'):
return loader.is_package(mod_name)
# importlib's namespace loaders do not have this functionality but
# all the modules it loads are packages, so we can take advantage of
# this information.
elif (loader.__class__.__module__ == '_frozen_importlib' and
loader.__class__.__name__ == 'NamespaceLoader'):
return True
# Otherwise we need to fail with an error that explains what went
# wrong.
raise AttributeError(
('%s.is_package() method is missing but is required by Flask of '
'PEP 302 import hooks. If you do not use import hooks and '
'you encounter this error please file a bug against Flask.') %
loader.__class__.__name__)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
mod = sys.modules[import_name]
filepath = getattr(mod, '__file__', None)
# If we don't have a filepath it might be because we are a
# namespace package. In this case we pick the root path from the
# first module that is contained in our package.
if filepath is None:
raise RuntimeError('No root path can be found for the provided '
'module "%s". This can happen because the '
'module came from an import hook that does '
'not provide file name information or because '
'it\'s a namespace package. In this case '
'the root path needs to be explicitly '
'provided.' % import_name)
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# In case the root module is a package we need to chop of the
# rightmost part. This needs to go through a helper function
# because of python 3.3 namespace packages.
if _matching_loader_thinks_module_is_package(
loader, root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class _PackageBoundObject(object):
#: The name of the package or module that this app belongs to. Do not
#: change this once it is set by the constructor.
import_name = None
#: Location of the template files to be added to the template lookup.
#: ``None`` if templates should not be added.
template_folder = None
#: Absolute path to the package on the filesystem. Used to look up
#: resources contained in the package.
root_path = None
def __init__(self, import_name, template_folder=None, root_path=None):
self.import_name = import_name
self.template_folder = template_folder
if root_path is None:
root_path = get_root_path(self.import_name)
self.root_path = root_path
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(
_get_static_folder, _set_static_folder,
doc='The absolute path to the configured static folder.'
)
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is not None:
return self._static_url_path
if self.static_folder is not None:
return '/' + os.path.basename(self.static_folder)
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(
_get_static_url_path, _set_static_url_path,
doc='The URL prefix that the static route will be registered for.'
)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is ``True`` if the package bound object's container has a
folder for static files.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename): # pylint: disable=unused-argument
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is ``None``. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return total_seconds(current_app.send_file_max_age_default)
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the :file:`schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def get_flashed_messages(with_categories=False, category_filter=[]): # pylint: disable=dangerous-default-value
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is ``None``, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
function results in a :exc:`~werkzeug.routing.BuildError` when the current
app does not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
it is not ``None``, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.url_build_error_handlers.append(external_url_handler)
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to ``True``, an absolute URL is generated. Server
address can be changed via ``SERVER_NAME`` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
behavior uses the same scheme as the current request, or
``PREFERRED_URL_SCHEME`` from the :ref:`app configuration <config>` if no
request context is available. As of Werkzeug 0.10, this also can be set
to an empty string to build protocol-relative URLs.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError(
'Attempted to generate a URL without the application context being'
' pushed. This has to be executed when application context is'
' available.'
)
# If request specific information is available we have some extra
# features that support "relative" URLs.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the URLs external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError(
'Application was not able to create a URL adapter for request'
' independent URL generation. You might be able to fix this by'
' setting the SERVER_NAME config variable.'
)
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
# This is not the best way to deal with this but currently the
# underlying Werkzeug router does not support overriding the scheme on
# a per build call basis.
old_scheme = None
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
old_scheme = url_adapter.url_scheme
url_adapter.url_scheme = scheme
try:
try:
rv = url_adapter.build(endpoint, values, method=method, # pylint: disable=invalid-name
force_external=external)
finally:
if old_scheme is not None:
url_adapter.url_scheme = old_scheme
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
values['_scheme'] = scheme
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor) # pylint: disable=invalid-name
return rv
def is_ip(value):
"""Determine if the given string is an IP address.
Python 2 on Windows doesn't provide ``inet_pton``, so this only
checks IPv4 addresses in that environment.
:param value: value to check
:type value: str
:return: True if string is an IP address
:rtype: bool
"""
if PY2 and os.name == 'nt':
try:
socket.inet_aton(value)
return True
except socket.error:
return False
for family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(family, value)
except socket.error:
pass
else:
return True
return False
| nilq/baby-python | python |
import os
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
from matplotlib import pyplot as plt
from matplotlib import colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pytorch_lightning import Trainer
from models.unet.unet import UNet
from configure import get_config
def main(params):
config = get_config(**vars(params))
checkpoint_dir = os.path.join(params.checkpoint, 'checkpoints')
figures_dir = os.path.join(params.checkpoint, 'figures')
checkpoint = [os.path.join(checkpoint_dir, x) for x in os.listdir(checkpoint_dir)][0]
model = UNet.load_from_checkpoint(checkpoint_path=checkpoint)
model.freeze()
model.hparams.dataset_folder = '/media/nvm/itype_/pth_snt/2019'
model.hparams.batch_size = 1
if params.metrics:
trainer = Trainer(
precision=16,
gpus=config.device_ct,
num_nodes=config.node_ct,
log_every_n_steps=5)
trainer.test(model)
loader = model.val_dataloader()
for i, (x, y) in enumerate(loader):
out = model(x)
pred = out.argmax(1)
x, y, pred = x.squeeze().numpy(), y.squeeze().numpy(), pred.squeeze().numpy()
fig = os.path.join(figures_dir, '{}.png'.format(i))
plot_prediction(x, y, pred, model.mode, out_file=fig)
def plot_prediction(x, label, pred, mode, out_file=None):
cmap_label = colors.ListedColormap(['white', 'green', 'yellow', 'blue', 'pink', 'grey'])
bounds_l = [0, 1, 2, 3, 4, 5, 6]
bound_norm_l = colors.BoundaryNorm(bounds_l, len(bounds_l))
classes = ['flood', 'sprinkler', 'pivot', 'rainfed', 'uncultivated']
cmap_pred = colors.ListedColormap(['green', 'yellow', 'blue', 'pink', 'grey'])
bounds_p = [1, 2, 3, 4, 5]
bound_norm_p = colors.BoundaryNorm(bounds_p, len(bounds_p), extend='max')
fig, ax = plt.subplots(ncols=5, nrows=1, figsize=(20, 10))
r, g, b = x[0, :, :].astype('uint8'), x[1, :, :].astype('uint8'), x[2, :, :].astype('uint8')
rgb = np.dstack([r, g, b])
im = ax[0].imshow(rgb)
ax[0].set(xlabel='image')
divider = make_axes_locatable(ax[0])
cax = divider.append_axes('bottom', size='10%', pad=0.6)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
mx_ndvi = x[4, :, :] / 1000.
im = ax[1].imshow(mx_ndvi, cmap='RdYlGn')
ax[1].set(xlabel='ndvi early')
divider = make_axes_locatable(ax[1])
cax = divider.append_axes('bottom', size='10%', pad=0.6)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
std_ndvi = x[7, :, :] / 1000.
im = ax[2].imshow(std_ndvi, cmap='RdYlGn')
ax[2].set(xlabel='ndvi late')
divider = make_axes_locatable(ax[2])
cax = divider.append_axes('bottom', size='10%', pad=0.6)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
im = ax[3].imshow(label, cmap=cmap_label, norm=bound_norm_l)
ax[3].set(xlabel='label {}'.format(np.unique(label)))
divider = make_axes_locatable(ax[3])
cax = divider.append_axes('bottom', size='10%', pad=0.6)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb.set_ticks([])
im = ax[4].imshow(pred, cmap=cmap_pred, norm=bound_norm_p)
ax[4].set(xlabel='pred {}'.format(np.unique(pred)))
divider = make_axes_locatable(ax[4])
cax = divider.append_axes('bottom', size='10%', pad=0.6)
cb = fig.colorbar(im, cax=cax, orientation='horizontal')
cb.ax.set_xticklabels(classes)
plt.tight_layout()
if out_file:
plt.savefig(out_file)
plt.close()
else:
plt.show()
if __name__ == '__main__':
project = '/home/dgketchum/PycharmProjects/itype'
checkpoint_pth = os.path.join(project, 'models/unet/results/aws-2021.04.22.00.39-unet-rgbn_snt')
parser = ArgumentParser(add_help=False)
parser.add_argument('--model', default='unet')
parser.add_argument('--mode', default='rgbn')
parser.add_argument('--gpu', default='RTX')
parser.add_argument('--machine', default='pc')
parser.add_argument('--nodes', default=1, type=int)
parser.add_argument('--progress', default=0, type=int)
parser.add_argument('--workers', default=12, type=int)
parser.add_argument('--checkpoint', default=checkpoint_pth)
parser.add_argument('--metrics', default=False, type=bool)
args = parser.parse_args()
main(args)
# ========================= EOF ====================================================================
| nilq/baby-python | python |
import datetime
import pymongo
client = pymongo.MongoClient('127.0.0.1', 27017)
db = client['qbot']
db.drop_collection('increase')
db.drop_collection('welcome')
db.create_collection('welcome')
db.welcome.insert_many([
{
'group_id': 457263503,
'type': 'card',
'icon': 'https://sakuyark.com/static/images/yyicon.jpg',
'tips': [('请仔细查看公告内的群规',), ('格式 [C/K+]班级[+学号]-名字', '名片格式')],
'text': '''欢迎新同学''',
'opened': True
},{
'group_id': 1003132999,
'type': 'card',
'tips': [('请仔细查看公告内的群规',), ('格式 [C/K+]班级[+学号]-名字', '名片格式')],
'text': '''欢迎新同学''',
'opened': True
}
])
| nilq/baby-python | python |
import torch
import torch.nn as nn
from torch import optim
import mask_detector.trainer as trainer
from mask_detector.dataset import DatasetType, generate_train_datasets, generate_test_datasets
from mask_detector.models import BaseModel
from mask_detector.combined_predictor import Predictor_M1, submission_label_recalc
import numpy as np
import random
def train_predictor():
print(f"PyTorch version: {torch.__version__}.")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"device: {device}")
seed = 92834
seed_everything(seed)
# 데이터셋 생성
dataset_root = "/opt/ml/input/data"
train_set, valid_set = generate_train_datasets(dataset_root, random_seed=seed, validation_ratio=0.225)
# training_model("gender-classifier", DatasetType.Gender, train_set, valid_set, device, seed)
# training_model("no-mask-classifier", DatasetType.Mask_Weared, train_set, valid_set, device, seed)
# training_model("good-mask-classifier", DatasetType.Correct_Mask, train_set, valid_set, device, seed)
# training_model("o60-classifier", DatasetType.Over59Age, train_set, valid_set, device, seed)
training_model("u30-classifier", DatasetType.Under30Age, train_set, valid_set, device, seed)
def predict_label():
print(f"PyTorch version: {torch.__version__}.")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"device: {device}")
dataset_root = "/opt/ml/input/data"
dataset, answer_board = generate_test_datasets(dataset_root)
predictor = Predictor_M1(batch_size=128, dataset=dataset, answer_board=answer_board, device=device)
predictor.predict()
def training_model(model_name, dataset_type, train_set, valid_set, device, random_seed, load_prev = False, custom_epoch = None):
epochs = 32
if custom_epoch is not None:
epochs = custom_epoch
batch_size = 256
logging_interval = int(len(train_set) / (batch_size * 3))
lr = 0.0001
# 모델 및 메트릭
model = BaseModel(num_classes=2).to(device)
if load_prev:
model.load_state_dict(torch.load(f"result/checkpoint/{model_name}/gender_last_model.pth"))
# 그래픽카드가 2개 이상인 경우, 고려
# model = torch.nn.DataParallel(model) # GPU는 하나 밖에 없는데....? ㅠㅠ
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
# Trainee 생성
gender_classifier_trainee = trainer.generate_trainee(
model_name,
model,
criterion,
optimizer,
device
)
# 체크포인트는 읽지 않는다 (학습을 중간에 그만두는 경우는 없어서...)
# gender_classifier_trainee.load_last_checkpoint()
gender_classifier_trainee.batch_size = batch_size
gender_classifier_trainee.log_interval = logging_interval
gender_classifier_trainee.epochs = epochs
gender_classifier_trainee.prepare_dataset(train_set, valid_set, dataset_type, random_seed=random_seed)
gender_classifier_trainee.train()
def seed_everything(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
if __name__ == "__main__":
# predict_label()
submission_label_recalc() | nilq/baby-python | python |
import logging
import os
import subprocess
import abc
from Bio.Sequencing import Ace
from .fasta_io import write_sequences
# compatible with Python 2 *and* 3:
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
try:
from tempfile import TemporaryDirectory
except ImportError:
from backports.tempfile import TemporaryDirectory
logger = logging.getLogger(__name__)
class BaseAssembly(ABC):
"""Provide Base Class for Assembly modules."""
def __init__(self, sequences, shm_dir):
"""Run assembly."""
self.sequences = sequences
with TemporaryDirectory(prefix="%s" % type(self).__name__, dir=shm_dir) as self.input_dir:
self.input_path = os.path.join(self.input_dir, 'multialign.fa')
self.write_sequences()
self.contigs = self.assemble()
@abc.abstractmethod
def assemble(self):
"""Must return contigs."""
def write_sequences(self):
"""Take sequences and write them out to a temporary file for cap3."""
write_sequences(sequences=self.sequences, output_path=self.input_path)
class Cap3Assembly(BaseAssembly):
"""A class that holds reads of a cluster and assembles them using cap3."""
seq_limit = 800
def __init__(self, sequences, shm_dir=None):
"""Asssemble sequences into contigs.
:param sequences: dictionary with query_name as key and read sequence as value
:type sequences: dictionary
>>> read1 = 'TAGTTGTAAGCGATTCTTAACTTACCTACCTACATATATATACTTACGTATTTTACTATT'
>>> read2 = 'CGAGTCGAACAAATGATCCGTCGTTTGACTAAGATCAACGCCTTTAAAGAAGTTTCAGAA'
>>> read3 = 'TACCTACCTACATATATATACTTACGTATTTTACTATTCGAGTCGAACAAATGATCCGTC'
>>> read4 = 'CGATTCTTAACTTACCTACCTACATATATATACTTACGTATTTTACTATTCGAGTCGAACA'
>>> sequences = {'read1': read1, 'read2': read2, 'read3': read3, 'read4': read4}
>>> len(Cap3Assembly(sequences).contigs)
1
>>> too_many_reads = {i: read1 for i in range(802)}
>>> len(Cap3Assembly(too_many_reads).contigs)
0
"""
super(Cap3Assembly, self).__init__(sequences=sequences, shm_dir=shm_dir)
def assemble(self):
"""Assemble sequences."""
if 0 < len(self.sequences) < self.seq_limit:
with open(os.devnull, 'w') as DEVNULL:
args = ['cap3', self.input_path, '-p', '75', '-s', '500', '-z', '2']
try:
# Use check call to ignore stdout of cap3
subprocess.check_call(args, stdout=DEVNULL, close_fds=True, timeout=120)
except subprocess.SubprocessError as e:
logger.error("An error occured while attempting to assemble reads: "
"%s\n The problematic sequences are: %s", e, self.sequences)
return Ace.ACEFileRecord().contigs
return Ace.read(open(os.path.join(self.input_dir, 'multialign.fa.cap.ace'))).contigs
else:
# We return an empty record if there are too many sequences to assemble
return Ace.ACEFileRecord().contigs
| nilq/baby-python | python |
import copy
from ..core.api import BaseFLKnowledgeDistillationAPI
class FedMDAPI(BaseFLKnowledgeDistillationAPI):
def __init__(
self,
server,
clients,
public_dataloader,
local_dataloaders,
validation_dataloader,
criterion,
client_optimizers,
num_communication=10,
device="cpu",
consensus_epoch=1,
revisit_epoch=1,
transfer_epoch=10,
):
super().__init__(
server,
clients,
public_dataloader,
local_dataloaders,
validation_dataloader,
criterion,
num_communication,
device,
)
self.client_optimizers = client_optimizers
self.consensus_epoch = consensus_epoch
self.revisit_epoch = revisit_epoch
self.transfer_epoch = transfer_epoch
def train_client(self, public=True):
loss_on_local_dataest = []
for client_idx in range(self.client_num):
client = self.clients[client_idx]
if public:
trainloader = self.public_dataloader
else:
trainloader = self.local_dataloaders[client_idx]
optimizer = self.client_optimizers[client_idx]
running_loss = 0.0
for data in trainloader:
x, y = data
x = x.to(self.device)
y = y.to(self.device)
optimizer.zero_grad()
loss = self.criterion(client(x), y)
loss.backward()
optimizer.step()
running_loss += loss.item()
loss_on_local_dataest.append(copy.deepcopy(running_loss / len(trainloader)))
return loss_on_local_dataest
def run(self):
logging = {
"loss_client_local_dataset_transfer": [],
"loss_client_public_dataset_transfer": [],
"loss_client_consensus": [],
"loss_client_revisit": [],
"loss_server_public_dataset": [],
"acc": [],
}
for i in range(self.transfer_epoch):
loss_public = self.train_client(public=True)
loss_local = self.train_client(public=False)
print(f"epoch {i} (public - pretrain): {loss_local}")
print(f"epoch {i} (local - pretrain): {loss_public}")
logging["loss_client_public_dataset_transfer"].append(loss_public)
logging["loss_client_local_dataset_transfer"].append(loss_local)
for i in range(1, self.num_communication + 1):
self.server.update()
self.server.distribute()
# Digest
temp_consensus_loss = []
for j, client in enumerate(self.clients):
for _ in range(self.consensus_epoch):
consensus_loss = client.approach_consensus(
self.client_optimizers[j]
)
print(f"epoch {i}, client {j}: {consensus_loss}")
temp_consensus_loss.append(consensus_loss)
logging["loss_client_consensus"].append(temp_consensus_loss)
# Revisit
for _ in range(self.revisit_epoch):
loss_local_revisit = self.train_client(public=False)
logging["loss_client_revisit"].append(loss_local_revisit)
# evaluation
temp_acc_list = []
for j, client in enumerate(self.clients):
acc = client.score(self.validation_dataloader)
print(f"client {j} acc score is ", acc)
temp_acc_list.append(acc)
logging["acc"].append(temp_acc_list)
return logging
| nilq/baby-python | python |
import numpy as np
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
def max_pool(map, K):
# First, trim the map down such that it can be divided evenly into K by K square sections.
# Try to keep the trimming as symmetric as possible: If we trim the bottom side, trim the top side next, etc.
H, W = map.shape
K = K
H_excess = H % K
W_excess = W % K
start_x = H_excess / 2
end_x = H - (H_excess / 2)
start_y = W_excess / 2
end_y = W - (W_excess / 2)
# In the event that we only need to trim one edge to make that dimension divisible by K, we have over-adjusted
# in the above code. Rectify that here - is there a simple way to not make that mistake prior?
if (H_excess % 2 == 1):
end_x -= 1
if (W_excess % 2 == 1):
end_y -= 1
map = map[start_x:end_x, start_y:end_y] # Adjusted map that can now be divided into KxK sections
# Divide the adjusted map into KxK sections, taking the max value of each section to be the value of that
# section.
# We can also take a running total of the number of 1's in each section, to determine which
# sections are least likely to be impassable.
HK = H // K
WK = W // K
weighted_map = (map[:HK * K, :WK * K].reshape(HK, K, WK, K).sum(axis=(1, 3)))
print 'Weighted reduced map:'
print weighted_map
weighted_map[weighted_map > 0] *= -1
weighted_map[weighted_map == 0] = 1
grid = Grid(matrix=weighted_map)
start = grid.node(2, 0)
end = grid.node(0, 2)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
path, runs = finder.find_path(start, end, grid)
path_found = (len(path) != 0)
threshold = 0
while not path_found:
threshold -= 1
weighted_map[weighted_map == threshold] = 1
grid = Grid(matrix=weighted_map)
start = grid.node(2, 0)
end = grid.node(0, 2)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
path, runs = finder.find_path(start, end, grid)
path_found = (len(path) != 0)
print(path)
print('operations:', runs, 'path length:', len(path))
print(grid.grid_str(path=path, start=start, end=end))
print 'Highest weight allowed to drive over: ', threshold * -1
adj_path = np.array(path)
adj_path = K * adj_path + (K/2)
print adj_path
for pt in adj_path[:-1]:
# computeEasyPath(pt, pt + 1, stepSize?????)
print('hey') #placeholder so the red squiggly leaves me alone
return weighted_map
if __name__ == "__main__":
map = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 0, 1]])
K = 3
max_pool(map, K)
| nilq/baby-python | python |
import numpy as np
import os
from LoadpMedian import *
from LoadData import *
from gurobipy import *
from sklearn.metrics.pairwise import pairwise_distances
def kmedian_opt(distances, IP, k1, k2):
model = Model("k-median")
n = np.shape(distances)[0]
y,x = {}, {}
if IP:
var_type = GRB.BINARY
else:
var_type = GRB.CONTINUOUS
for j in range(n):
y[j] = model.addVar(obj=0, vtype=var_type, name="y[%s]"%j)
for i in range(n):
x[i,j] = model.addVar(obj=distances[i,j], vtype=var_type, \
name="x[%s,%s]"%(i,j))
model.update()
for i in range(n):
coef = [1 for j in range(n)]
var = [x[i,j] for j in range(n)]
model.addConstr(LinExpr(coef,var), "=", 1, name="Assign[%s]"%i)
for j in range(n):
for i in range(n):
model.addConstr(x[i,j], "<", y[j], name="Strong[%s,%s]"%(i,j))
model.setParam( 'OutputFlag', False )
model.__data = x,y
outputs = []
model.update()
for k in range(k1, k2):
coef = [1 for j in range(n)]
var = [y[j] for j in range(n)]
if k > k1:
model.remove(k_constr)
k_constr = model.addConstr(LinExpr(coef,var), "<", rhs=k)
model.update()
model.optimize()
if model.status == GRB.status.OPTIMAL:
outputs.append(model.objVal)
else:
outputs.append(0)
return outputs
def write_opt_bounds(distances, filepath, IP=1):
f = open(filepath, 'w+')
n = np.shape(distances)[0]
bounds = kmedian_opt(distances, IP, 1, n+1)
for k in range(1,n+1):
f.write(str(k)+" "+str(bounds[k-1])+"\n")
def write_opt_pmedians(path_files, file_bounds):
g = open(file_bounds, 'w+')
for f in os.listdir(path_files):
print f
distances, n, k = LoadpMedian(path_files+"\\"+f)
bound = kmedian_opt(distances, 1, k, k+1)
g.write(f+" "+str(bound)+"\n")
def write_opt_data(path_files, file_bounds):
g = open(file_bounds, 'w+')
for f in os.listdir(path_files):
print f
X, y, n, k = LoadData(path_files+"\\"+f)
distances = pairwise_distances(X)
bound = kmedian_opt(distances, 1, k, k+1)
g.write(f+" "+str(bound)+"\n")
def write_opt_hier_data(path_files, path_bounds):
for f in os.listdir(path_files):
print f
X, y, n, k = LoadData(path_files+"\\"+f)
distances = pairwise_distances(X)
write_opt_bounds(distances, path_bounds+"\\"+f)
| nilq/baby-python | python |
#!/usr/bin/env python3
"""
Created on 2 Mar 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.data.duplicates import Duplicates
from scs_core.data.json import JSONify
from scs_core.data.path_dict import PathDict
# --------------------------------------------------------------------------------------------------------------------
data = [
'{"rec": "2019-02-01T01:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 34.0}}}',
'{"rec": "2019-02-01T02:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 34.0}}}',
'{"rec": "2019-02-01T03:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 47.0}}}',
'{"rec": "2019-02-01T04:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 55.0}}}',
'{"rec": "2019-02-01T05:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 59.0}}}',
'{"rec": "2019-02-01T06:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 61.0}}}',
'{"rec": "2019-02-01T04:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 55.0}}}',
'{"rec": "2019-02-01T05:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 59.0}}}',
'{"rec": "2019-02-01T05:00:00+00:00", "val": {"NO2": {"status": "P", "dns": 59.0}}}'
]
# --------------------------------------------------------------------------------------------------------------------
dupes = Duplicates()
print("dupes: %s" % dupes)
print("-")
index = 0
# test...
for jstr in data:
index += 1
datum = PathDict.construct_from_jstr(jstr)
key = datum.node('rec')
is_duplicate = dupes.test(index, key, datum)
print("key: %s is_duplicate: %s" % (key, is_duplicate))
print("dupes: %s" % dupes)
print("-")
# report...
print("keys: %s" % dupes.keys)
print("matched_key_count: %s" % dupes.matched_key_count)
print("max_index: %s" % dupes.max_index)
print("-")
print("matched_keys: %s" % [key for key in dupes.matched_keys()])
print("-")
for count in dupes.match_counts():
print(JSONify.dumps(count))
print("-")
for match in dupes.matches():
print(JSONify.dumps(match))
| nilq/baby-python | python |
import validator.validator as validator
from validator.test.fixtures import Fixtures
class TestGetSchemaInfoFromPointer(object):
fxt = Fixtures('get_schema_info_from_pointer')
def do_fxt_test(self, fxt_path):
fixture = self.fxt.get_anymarkup(self.fxt.path(fxt_path))
obj = validator.get_schema_info_from_pointer(
fixture['schema'], fixture['ptr'],
fixture.get('schemas_bundle', {}))
assert fixture['magic'] == obj
def test_object(self):
self.do_fxt_test('object.yml')
def test_object_array(self):
self.do_fxt_test('object_array.yml')
def test_object_object(self):
self.do_fxt_test('object_object.yml')
def test_complex(self):
self.do_fxt_test('complex.yml')
def test_external_ref(self):
self.do_fxt_test('external_ref.yml')
| nilq/baby-python | python |
#!/usr/bin/python
#####################################
### CIS SLOT FILLING SYSTEM ####
### 2014-2015 ####
### Author: Heike Adel ####
#####################################
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '../../cnnScripts'))
import cPickle
import numpy
import theano
import theano.tensor as T
from utils import readConfig, getInput
from testCNN_binary import CNN
from utils_training import getFScore, sgd_updates
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
if len(sys.argv) != 2:
logging.error("please pass the config file for the binary CNN as parameter")
exit()
configfile = sys.argv[1]
config = readConfig(configfile)
trainfile = config["train"]
logger.info("trainfile " + trainfile)
devfile = config["dev"]
logger.info("devfile " + devfile)
wordvectorfile = config["wordvectors"]
networkfile = config["net"]
logger.info("networkfile " + networkfile)
learning_rate = float(config["lrate"])
logger.info("learning rate " + str(learning_rate))
batch_size = int(config["batchsize"])
logger.info("batch size " + str(batch_size))
myLambda1 = 0
if "lambda1" in config:
myLambda1 = float(config["lambda1"])
myLambda2 = 0
if "lambda2" in config:
myLambda2 = float(config["lambda2"])
logger.info("lambda1 " + str(myLambda1))
logger.info("lambda2 " + str(myLambda2))
# load model architecture and word vectors etc
binaryCNN = CNN(configfile, train = True)
trainfilehandle = open(trainfile)
inputMatrixTrain_a, inputMatrixTrain_b, inputMatrixTrain_c, length_a, length_b, length_c, inputFeaturesTrain, resultVectorTrain = getInput(trainfilehandle, binaryCNN.representationsize, binaryCNN.contextsize, binaryCNN.filtersize, binaryCNN.wordvectors, binaryCNN.vectorsize)
trainfilehandle.close()
devfilehandle = open(devfile)
inputMatrixDev_a, inputMatrixDev_b, inputMatrixDev_c, length_a, length_b, length_c, inputFeaturesDev, resultVectorDev = getInput(devfilehandle, binaryCNN.representationsize, binaryCNN.contextsize, binaryCNN.filtersize, binaryCNN.wordvectors, binaryCNN.vectorsize)
devfilehandle.close()
dt = theano.config.floatX
train_set_xa = theano.shared(numpy.matrix(inputMatrixTrain_a, dtype = dt), borrow=True)
valid_set_xa = theano.shared(numpy.matrix(inputMatrixDev_a, dtype = dt), borrow=True)
train_set_xb = theano.shared(numpy.matrix(inputMatrixTrain_b, dtype = dt), borrow=True)
valid_set_xb = theano.shared(numpy.matrix(inputMatrixDev_b, dtype = dt), borrow=True)
train_set_xc = theano.shared(numpy.matrix(inputMatrixTrain_c, dtype = dt), borrow=True)
valid_set_xc = theano.shared(numpy.matrix(inputMatrixDev_c, dtype = dt), borrow=True)
train_set_y = theano.shared(numpy.array(resultVectorTrain, dtype = numpy.dtype(numpy.int32)), borrow=True)
train_mlp = theano.shared(numpy.matrix(inputFeaturesTrain, dtype = dt), borrow=True)
valid_set_y = theano.shared(numpy.array(resultVectorDev, dtype = numpy.dtype(numpy.int32)), borrow=True)
valid_mlp = theano.shared(numpy.matrix(inputFeaturesDev, dtype = dt), borrow=True)
index = T.lscalar() # index to a [mini]batch
lr = T.scalar('lr', dt)
params = binaryCNN.params
cost = binaryCNN.layer3.negative_log_likelihood(binaryCNN.y) + myLambda2 * (T.sum(binaryCNN.layer3.params[0] ** 2) + T.sum(binaryCNN.layer2.params[0] ** 2) + T.sum(binaryCNN.layer0a.params[0] ** 2)) + myLambda1 * (T.sum(abs(binaryCNN.layer3.params[0])) + T.sum(abs(binaryCNN.layer2.params[0])) + T.sum(abs(binaryCNN.layer0a.params[0])))
grads = T.grad(cost, params)
updates = sgd_updates(params, cost, lr)
# define theano functions
start = index * batch_size
stop = (index + 1) * batch_size
train = theano.function([index, lr], cost, updates = updates, givens = {
binaryCNN.xa: train_set_xa[start : stop],
binaryCNN.xb: train_set_xb[start : stop],
binaryCNN.xc: train_set_xc[start : stop],
binaryCNN.additionalFeatures: train_mlp[start : stop],
binaryCNN.y : train_set_y[start : stop]})
validate = theano.function([index], binaryCNN.layer3.results(), givens = {
binaryCNN.xa: valid_set_xa[start : stop],
binaryCNN.xb: valid_set_xb[start : stop],
binaryCNN.xc: valid_set_xc[start : stop],
binaryCNN.additionalFeatures: valid_mlp[start : stop]})
logger.info("... training")
# train model
n_epochs=100
best_params = []
best_fscore = -1
last_fscore = -1
noImprovement = 0
maxNoImprovement = 5
epoch = 0
done_looping = False
n_valid_batches = inputMatrixDev_a.shape[0] / batch_size
maxNumPerEpoch = 50000 # change according to computing ressources
numPerEpoch = min(inputMatrixTrain_a.shape[0], maxNumPerEpoch)
n_train_batches = numPerEpoch / batch_size
while (epoch < n_epochs) and (not done_looping):
logger.info('epoch = ' + str(epoch))
epoch = epoch + 1
# shuffling data for batch
randomIndices = numpy.random.permutation(inputMatrixTrain_a.shape[0])
randomIndicesThis = randomIndices[0:numPerEpoch]
train_set_xa.set_value(numpy.matrix(inputMatrixTrain_a[randomIndicesThis], dtype = dt), borrow=True)
train_set_xb.set_value(numpy.matrix(inputMatrixTrain_b[randomIndicesThis], dtype = dt), borrow=True)
train_set_xc.set_value(numpy.matrix(inputMatrixTrain_c[randomIndicesThis], dtype = dt), borrow=True)
train_mlp.set_value(numpy.matrix(inputFeaturesTrain[randomIndicesThis], dtype = dt), borrow=True)
thisResultVectorTrain = []
for ri in randomIndicesThis:
thisResultVectorTrain.append(resultVectorTrain[ri])
train_set_y.set_value(numpy.array(thisResultVectorTrain, dtype = numpy.dtype(numpy.int32)), borrow=True)
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
logger.debug('training @ iter = ' + str(iter))
cost_ij = train(minibatch_index, learning_rate)
confidence = [validate(i) for i in xrange(n_valid_batches)]
this_fscore = getFScore(confidence, resultVectorDev, batch_size)
logger.info('epoch ' + str(epoch) + ", learning_rate " + str(learning_rate) + ", validation fscore " + str(this_fscore))
# if we got the best validation score until now
if this_fscore > best_fscore:
# save best validation score and iteration number
best_fscore = this_fscore
best_iter = iter
best_params = []
for p in binaryCNN.params:
best_params.append(p.get_value(borrow=False))
else:
if this_fscore > last_fscore:
noImprovement -= 1
else:
noImprovement += 1
learning_rate /= 2
print "reducing learning rate to " + str(learning_rate)
last_fscore = this_fscore
if noImprovement > maxNoImprovement or learning_rate < 0.0000001:
done_looping = True
break
logger.info('Optimization complete.')
# save best parameters
save_file = open(networkfile, 'wb')
for p in best_params:
cPickle.dump(p, save_file, -1)
| nilq/baby-python | python |
from celery.schedules import crontab
from datetime import timedelta
from decimal import Decimal
import logging
DEBUG = True
TESTING = False
ASSETS_DEBUG = True
CSRF_SESSION_KEY = "blahblahblah"
SECRET_KEY = "blahblahblah"
GEOCITY_DAT_LOCATION = "/scout/scout/libs/GeoLiteCity.dat"
LOGGING_LEVEL = logging.DEBUG
LOGGING_FILE = "/scout/app.log"
PORT = 8182
SENTRY_CONFIG = {
'dsn': '',
'environment': 'matts dev'
}
MONGODB_SETTINGS = {
'DB': 'scout-m',
'HOST': 'mongodb'
}
CELERYBEAT_SCHEDULE = {
}
MAX_CONTENT_LENGTH = 10 * 1024 * 1024 # 5MB
# MAILGUN
MAILGUN_DOMAIN = '.mailgun.org'
MAILGUN_API_KEY = 'key-'
MAILGUN_DEFAULT_FROM = 'Mailgun Sandbox <postmaster@.mailgun.org>'
# REDIS
REDIS_HOST = 'redis'
REDIS_PORT = 6379
REDIS_DB = 3
# CELERY
CELERY_TASK_SERIALIZER = 'custom_json'
CELERY_ACCEPT_CONTENT = ['custom_json']
CELERY_RESULT_BACKEND = "redis/3"
CELERY_BROKER_URL = "redis/3"
ASPIRE_BLOCK_URL = 'http://aspireblock:4100/api/'
ASPIRE_BLOCK_USER = 'rpc'
ASPIRE_BLOCK_PASS = 'rpc'
ASPIRE_GAS_HOST = 'gasp'
ASPIRE_GAS_PORT = 8332
ASPIRE_GAS_USER = 'rpc'
ASPIRE_GAS_PASS = 'rpc'
| nilq/baby-python | python |
import logging
import os
import csv
from typing import List
from ... import InputExample
import numpy as np
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str='', threshold: float = 0.5, write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc*100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/6
@Author : AnNing
"""
import os
import h5py
import matplotlib.pyplot as plt
import numpy as np
from lib.lib_read_ssi import FY4ASSI, FY3DSSI
from lib.lib_database import add_result_data, exist_result_data
from lib.lib_proj import fill_points_2d_nan
from lib.lib_constant import BASEMAP_FY4_4KM, CHINA_RANGE_MASK_1KM
def get_china_mask_projlut_fy4_1km():
"""
中国区的shape掩码
:return:
"""
with h5py.File(CHINA_RANGE_MASK_1KM, 'r') as hdf:
mask_china = hdf.get('Mask')[:]
shape = (4501, 7001)
mask = np.zeros(shape, dtype=np.int8)
mask[100:100+3600, 301:301+6200] = mask_china
return mask > 0
def plot_image_disk(*args, **kwargs):
resultid = kwargs.get('resultid')
if resultid is not None and 'fy4a' in resultid.lower():
plot_fy4a_image_disk(*args, **kwargs)
else:
print('plot_image_disk不支持此分辨率{}'.format(resultid))
def plot_fy4a_image_disk(data, out_file='test.jpg', resolution_type='4km', vmin=0, vmax=1000, **kwargs):
if '4km' in resolution_type.lower():
ditu = plt.imread(BASEMAP_FY4_4KM)
row, col, _ = ditu.shape
fig = plt.figure(figsize=(col / 100, row / 100), dpi=100)
fig.figimage(ditu)
else:
print('plot_image_disk 不支持此分辨率: {}'.format(resolution_type))
return
fig.figimage(data, vmin=vmin, vmax=vmax, cmap='jet', alpha=0.7)
fig.patch.set_alpha(0)
plt.savefig(out_file, transparent=True)
fig.clear()
plt.close()
print("监测到数据的最小值和最大值:{}, {}".format(np.nanmin(data), np.nanmax(data)))
print('>>> :{}'.format(out_file))
def plot_fy3_image_map(data, out_file='test.jpg', resolution_type='1km', vmin=0, vmax=2, **kwargs):
if '1km' in resolution_type.lower():
row, col = data.shape
else:
print('plot_fy3_image_map:不支持的分辨率:{}'.format(resolution_type))
return
fig = plt.figure(figsize=(col / 100, row / 100), dpi=100)
fig.figimage(data, vmin=vmin, vmax=vmax, cmap='jet')
fig.patch.set_alpha(0)
plt.savefig(out_file, transparent=True)
fig.clear()
plt.close()
print("监测到数据的最小值和最大值:{}, {}".format(np.nanmin(data), np.nanmax(data)))
print('>>> :{}'.format(out_file))
def plot_image_map(*args, **kwargs):
resultid = kwargs['resultid']
if 'fy4a' in resultid.lower():
plot_fy4_image_map(*args, **kwargs)
elif 'fy3d' in resultid.lower():
plot_fy3_image_map(*args, **kwargs)
else:
print('plot_image_map:不支持的卫星和分辨率: {}'.format(resultid))
def plot_fy4_image_map(data, out_file='test.jpg', resolution_type='4km', vmin=0, vmax=1000, interp=3, **kwargs):
if '4km' in resolution_type.lower():
projlut = FY4ASSI.get_lonlat_projlut_4km()
mask = None
elif '1kmcorrect' in resolution_type.lower():
projlut = FY4ASSI.get_lonlat_projlut_1km()
interp = 1
mask = get_china_mask_projlut_fy4_1km()
elif '1km' in resolution_type.lower():
projlut = FY4ASSI.get_lonlat_projlut_1km()
mask = get_china_mask_projlut_fy4_1km()
else:
raise ValueError('plot_image_map 不支持此分辨率: {}'.format(resolution_type))
row, col = projlut['row_col']
image_data = np.full((row, col), np.nan, dtype=np.float32)
proj_i = projlut['prj_i']
proj_j = projlut['prj_j']
pre_i = projlut['pre_i']
pre_j = projlut['pre_j']
# 投影方格以外的数据过滤掉
valid_index = np.logical_and.reduce((proj_i >= 0, proj_i < row,
proj_j >= 0, proj_j < col))
proj_i = proj_i[valid_index]
proj_j = proj_j[valid_index]
pre_i = pre_i[valid_index]
pre_j = pre_j[valid_index]
image_data[proj_i, proj_j] = data[pre_i, pre_j]
fig = plt.figure(figsize=(col / 100, row / 100), dpi=100)
for i in range(interp):
fill_points_2d_nan(image_data)
# 对1KM数据使用china的shape掩码
if mask is not None:
image_data[~mask] = np.nan
fig.figimage(image_data, vmin=vmin, vmax=vmax, cmap='jet')
fig.patch.set_alpha(0)
plt.savefig(out_file, transparent=True)
fig.clear()
plt.close()
print("监测到数据的最小值和最大值:{}, {}".format(np.nanmin(data), np.nanmax(data)))
print('>>> :{}'.format(out_file))
def plot_map_full(in_file, vmin=0, vmax=1000, resultid='', planid='', datatime='', resolution_type=None):
print('plot_map_orbit <<<:{}'.format(in_file))
if not os.path.isfile(in_file):
print('数据不存在:{}'.format(in_file))
return
dir_ = os.path.dirname(in_file)
in_filename = os.path.basename(in_file)
if 'fy4a' in resultid.lower():
datas = FY4ASSI(in_file)
elif 'fy3d' in resultid.lower():
datas = FY3DSSI(in_file)
else:
print('不支持的卫星:{}'.format(resultid))
return
datas_ = {
'Itol': datas.get_ssi,
'Ib': datas.get_ib,
'Id': datas.get_id,
'G0': datas.get_g0,
'Gt': datas.get_gt,
'DNI': datas.get_dni,
}
for element in datas_.keys():
try:
data = datas_[element]()
except Exception as why:
print(why)
print('读取数据错误:{}'.format(element))
data = None
if data is not None:
# 快视图绘制
area_type = 'Full_DISK'
out_filename1 = in_filename + '_{}_{}.PNG'.format(area_type, element)
out_file1 = os.path.join(dir_, out_filename1)
try:
if not os.path.isfile(out_file1):
plot_image_disk(data, out_file=out_file1, resultid=resultid, resolution_type=resolution_type,
vmin=vmin, vmax=vmax)
else:
print('文件已经存在,跳过:{}'.format(out_file1))
# 入库
if os.path.isfile(out_file1) and not exist_result_data(resultid=resultid, datatime=datatime,
resolution_type=resolution_type,
element=element, area_type=area_type):
add_result_data(resultid=resultid, planid=planid, address=out_file1, datatime=datatime,
resolution_type=resolution_type, area_type=area_type, element=element)
except Exception as why:
print(why)
print('绘制{}图像错误:{}'.format(area_type, out_file1))
# 等经纬图绘制
area_type = 'Full_LATLON'
out_filename2 = in_filename + '_{}_{}.PNG'.format(area_type, element)
out_file2 = os.path.join(dir_, out_filename2)
# try:
if not os.path.isfile(out_file2):
plot_image_map(data, out_file=out_file2, resultid=resultid, resolution_type=resolution_type,
vmin=vmin,
vmax=vmax)
else:
print('文件已经存在,跳过:{}'.format(out_file2))
# 入库
if os.path.isfile(out_file2) and not exist_result_data(resultid=resultid, datatime=datatime,
resolution_type=resolution_type,
element=element, area_type=area_type):
add_result_data(resultid=resultid, planid=planid, address=out_file2, datatime=datatime,
resolution_type=resolution_type, area_type=area_type, element=element)
# except Exception as why:
# print(why)
# print('绘制{}图像错误:{}'.format(area_type, out_file2))
if __name__ == '__main__':
i_dir = r'D:\SourceData\RemoteSensing\FY4A\AGRI\L2\SSI\20190630'
i_filename = 'FY4A-_AGRI--_N_DISK_1047E_L2-_SSI-_MULT_NOM_20190630000000_20190630001459_4000M_V0001.NC'
i_file = os.path.join(i_dir, i_filename)
plot_map_full(i_file)
| nilq/baby-python | python |
# Problem: https://docs.google.com/document/d/1B-bTbxNllKj0wbou5h4iaLyzgW3EbF3un0-5QKLVcy0/edit?usp=sharing
h=int(input())
w=int(input())
for _ in range(h):
for _ in range(w):
print('O',end='')
print()
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.