content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# TemperatureConverter Tests
import unittest
from TemperatureConverter import TemperatureConverter
from TemperatureErrors import *
class KnownValues(unittest.TestCase):
knownValues = (
(0, 32),
(100, 212)
)
temp_converter = TemperatureConverter()
# test value conversions
def testCtoF(self):
"""cToF should return the known Fahrenheit value for the
provided Celsius value."""
for c_val, f_val in self.knownValues:
result = self.temp_converter.cToF(c_val)
self.assertEqual(result, f_val)
def testCtoK(self):
"""cToK should return 273.15 Kelvin for 0 Celsius"""
result = self.temp_converter.cToK(0)
self.assertEqual(result, 273.15)
def testFtoC(self):
"""fToC should return the known Celsius value for the
provided Fahrenheit value."""
for c_val, f_val in self.knownValues:
result = self.temp_converter.fToC(f_val)
self.assertEqual(result, c_val)
def testFtoK(self):
"""fToK should return 273.15 Kelvin for 32 Fahrenheit"""
result = self.temp_converter.fToK(32)
self.assertEqual(result, 273.15)
def testKtoC(self):
"""kToC should return 0 Celsius for 273.15 Kelvin"""
result = self.temp_converter.kToC(273.15)
self.assertEqual(result, 0)
def testKtoF(self):
"""kToF should return 32 Fahrenheit for 273.15 Kelvin"""
result = self.temp_converter.kToF(273.15)
self.assertEqual(result, 32)
# sanity checks
def testCToKtoC(self):
"""Celsius to Kelvin to Celsius yields initial Celsius value"""
original_celsius_value = 20
kelvin_value = self.temp_converter.cToK(original_celsius_value)
new_celsius_value = self.temp_converter.kToC(kelvin_value)
self.assertEqual(original_celsius_value, new_celsius_value)
def testCToFToC(self):
"""Celsius to Fahrenheit to Celsius yields initial Celsius value"""
original_celsius_value = 20
fahrenheit_value = self.temp_converter.cToF(original_celsius_value)
new_celsius_value = self.temp_converter.fToC(fahrenheit_value)
self.assertEqual(original_celsius_value, new_celsius_value)
def testKToFToK(self):
"""Kelvin to Fahrenheit to Kelvin yields initial Kelvin value"""
original_kelvin_value = 20
fahrenheit_value = self.temp_converter.kToF(original_kelvin_value)
new_kelvin_value = self.temp_converter.fToK(fahrenheit_value)
self.assertEqual(original_kelvin_value, new_kelvin_value)
# test range exceptions
def testCtoFRange(self):
"""cToF should raise CelsiusRangeError if Celsius is less than -273.15"""
self.assertRaises(CelsiusRangeError, self.temp_converter.cToF, -274)
def testCtoKRange(self):
"""cToK should raise CelsiusRangeError if Celsius is less than -273.15"""
self.assertRaises(CelsiusRangeError, self.temp_converter.cToK, -274)
def testFtoCRange(self):
"""fToC should raise FahrenheitRangeError if Fahrenheit is less than -459.67"""
self.assertRaises(FahrenheitRangeError, self.temp_converter.fToC, -460)
def testFtoKRange(self):
"""fToK should raise FahrenheitRangeError if Fahrenheit is less than -459.67"""
self.assertRaises(FahrenheitRangeError, self.temp_converter.fToK, -460)
def testKtoCRange(self):
"""kToC should raise KelvinRangeError if Kelvin is less than 0"""
self.assertRaises(KelvinRangeError, self.temp_converter.kToC, -1)
def testKtoFRange(self):
"""kToF should raise KelvinRangeError if Kelvin is less than 0"""
self.assertRaises(KelvinRangeError, self.temp_converter.kToF, -1)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# Reads float and prints BRL money conversion to JPY
# In 2020/11/01 JP¥ 1.00 = R$ 0.060 :)
brl = float(input('Por favor, digite quanto dinheiro você tem na carteira: R$ '))
jpy = brl / 0.060
print('Com R$ {:.2f} você pode comprar ¥ {:.2f}.'.format(brl, jpy))
|
nilq/baby-python
|
python
|
def print_error(message):
print("An error occured: " + message)
def remove_white_chars(line):
line = line.replace('\t', '')
line = line.replace(' ', '')
line = line.replace('\n', '')
line = line.replace('\r', '')
return line
def remove_comments_and_empty_lines(text):
new_text = ''
first_line = True
for line in text.split("\n"):
line = remove_white_chars(line)
if len(line) > 0:
index = line.find('#')
if index >= 0:
line = line[:index]
if first_line:
new_text = line
first_line = False
else:
new_text += "\n" + line
return new_text
def get_path_of_main_file():
return os.path.dirname(os.path.realpath(__file__))
|
nilq/baby-python
|
python
|
import os
import logging
from dp4py_config.section import Section
from dp4py_config.utils import bool_env
from dp4py_sanic.config import CONFIG as SANIC_CONFIG
from dp_conceptual_search.config.utils import read_git_sha
def get_log_level(variable: str, default: str="INFO"):
"""
Returns the configured log level, and logs error if invalid
:param variable:
:param default:
:return:
"""
from dp4py_sanic.logging.log_config import get_level_name
level = os.environ.get(variable, default)
if isinstance(level, str):
level = level.upper()
try:
return get_level_name(level)
except NotImplementedError as e:
logging.error("Caught exception parsing log level", exc_info=e)
raise SystemExit()
# APP
APP_CONFIG = Section("APP config")
APP_CONFIG.sanic = SANIC_CONFIG
APP_CONFIG.app_version = read_git_sha()
APP_CONFIG.title = 'dp-conceptual-search'
APP_CONFIG.description = 'Dedicated search API for digital publishing.'
# API
API_CONFIG = Section("API config")
API_CONFIG.enabled_prometheus_metrics = bool_env('ENABLE_PROMETHEUS_METRICS', False)
API_CONFIG.testing = bool_env("TESTING", False)
API_CONFIG.conceptual_search_enabled = bool_env("CONCEPTUAL_SEARCH_ENABLED", False)
API_CONFIG.redirect_conceptual_search = bool_env("REDIRECT_CONCEPTUAL_SEARCH", False)
API_CONFIG.recommended_search_enabled = bool_env("RECOMMENDED_SEARCH_ENABLED", False)
# ML
ML_CONFIG = Section("Machine Learning config")
ML_CONFIG.unsupervised_model_filename = os.environ.get("UNSUPERVISED_MODEL_FILENAME",
"./dp_conceptual_search/ml/data/word2vec/ons_supervised.vec")
FASTTEXT_CONFIG = Section("FastText config")
FASTTEXT_CONFIG.fasttext_host = os.environ.get("DP_FASTTEXT_HOST", "localhost")
FASTTEXT_CONFIG.fasttext_port = int(os.environ.get("DP_FASTTEXT_PORT", 5100))
FASTTEXT_CONFIG.num_labels = int(os.environ.get("FASTTEXT_NUM_LABELS", 5))
FASTTEXT_CONFIG.threshold = float(os.environ.get("FASTTEXT_THRESHOLD", 0.0))
# Elasticsearch
ELASTIC_SEARCH_CONFIG = Section("Elasticsearch config")
ELASTIC_SEARCH_CONFIG.server = os.environ.get("ELASTIC_SEARCH_SERVER", "http://localhost:9200")
ELASTIC_SEARCH_CONFIG.async_enabled = bool_env("ELASTIC_SEARCH_ASYNC_ENABLED", True)
ELASTIC_SEARCH_CONFIG.timeout = int(os.environ.get("ELASTIC_SEARCH_TIMEOUT", 1000))
ELASTIC_SEARCH_CONFIG.elasticsearch_log_level = get_log_level("ELASTICSEARCH_LOG_LEVEL", default="INFO")
# Search
SEARCH_CONFIG = Section("Search API config")
SEARCH_CONFIG.default_search_index = "ons"
SEARCH_CONFIG.search_index = os.environ.get("SEARCH_INDEX", SEARCH_CONFIG.default_search_index)
SEARCH_CONFIG.departments_search_index = "departments"
SEARCH_CONFIG.results_per_page = int(os.getenv("RESULTS_PER_PAGE", 10))
SEARCH_CONFIG.max_visible_paginator_link = int(os.getenv("MAX_VISIBLE_PAGINATOR_LINK", 5))
SEARCH_CONFIG.max_request_size = int(os.getenv("SEARCH_MAX_REQUEST_SIZE", 200))
|
nilq/baby-python
|
python
|
from federatedscope.tabular.model.quadratic import QuadraticModel
__all__ = ['QuadraticModel']
|
nilq/baby-python
|
python
|
string = """0: "HOW ResidentSleeper LONG ResidentSleeper CAN ResidentSleeper THIS ResidentSleeper GO ResidentSleeper ON ResidentSleeper"
1: "REPPIN LONDON ONTARIO 519 GANG"
2: "my eyes"
3: "CLULG"
4: "ResidentSleeper"
5: "mvp ward"
6: "4Head EU WATCHING NA 4Head STILL AWAKE 4Head NO JOB 4Head NO RESPONSIBILITIES 4Head TYPICAL EU 4Head"
7: "***"
8: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
9: "JUST GO IN ResidentSleeper"
10: "PogU"
11: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
12: "EU WON A DREAMHACK TOURAMENT WITH NO KOREANS LUL"
13: "zonySleeper"
14: "santorin has to make a play"
15: "CLG 10th place"
16: "FIGHTTTTT"
17: "ResidentSleeper ResidentSleeper"
18: "IS THIS GROUNDHOGS GAME?? ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
19: "6k lead and doing nothing LUL"
20: "HOW ResidentSleeper LONG ResidentSleeper CAN ResidentSleeper THIS ResidentSleeper GO ResidentSleeper ON ResidentSleeper"
21: "HEY CASTERS! how many stacks does turtle have??"
22: "NARAM"
23: "LOOOOOOOOOL ResidentSleeper"
24: "ResidentSleeper ResidentSleeper ResidentSleeper"
25: "SKIP ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
26: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
27: "ResidentSleeper exciting game ResidentSleeper exciting game ResidentSleeper exciting game ResidentSleeper exciting game"
28: "WHY DOES VIPER HAVE 700G BOUNTY WTF"
29: "bioftost sweepers are awful LUL"""
clean = string.replace("\n", "").split('"')
arr = []
for i in range(len(clean)):
if i % 2 != 0:
arr.append(clean[i])
print(arr)
|
nilq/baby-python
|
python
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_delete_group"
class InstanceDeleteRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_delete_runners'
_runner_cls = 'InstanceDeleteRunner'
@test(depends_on_groups=[groups.INST_CREATE_WAIT],
groups=[GROUP, groups.INST_DELETE],
runs_after_groups=[groups.INST_INIT_DELETE,
groups.INST_ACTIONS,
groups.INST_UPGRADE,
groups.INST_ACTIONS_RESIZE_WAIT,
groups.BACKUP_INST_DELETE,
groups.BACKUP_INC_INST_DELETE,
groups.CFGGRP_INST_DELETE,
groups.DB_ACTION_DELETE,
groups.DB_ACTION_INST_DELETE,
groups.MODULE_INST_DELETE,
groups.REPL_INST_DELETE_WAIT,
groups.ROOT_ACTION_INST_DELETE,
groups.USER_ACTION_DELETE,
groups.USER_ACTION_INST_DELETE])
class InstanceDeleteGroup(TestGroup):
"""Test Instance Delete functionality."""
def __init__(self):
super(InstanceDeleteGroup, self).__init__(
InstanceDeleteRunnerFactory.instance())
@test
def instance_delete(self):
"""Delete an existing instance."""
self.test_runner.run_instance_delete()
@test(depends_on_groups=[groups.INST_DELETE],
groups=[GROUP, groups.INST_DELETE_WAIT],
runs_after_groups=[groups.BACKUP_INST_DELETE_WAIT,
groups.BACKUP_INC_INST_DELETE_WAIT,
groups.CFGGRP_INST_DELETE_WAIT,
groups.DB_ACTION_INST_DELETE_WAIT,
groups.MODULE_INST_DELETE_WAIT,
groups.REPL_INST_DELETE_WAIT,
groups.ROOT_ACTION_INST_DELETE_WAIT,
groups.USER_ACTION_INST_DELETE_WAIT])
class InstanceDeleteWaitGroup(TestGroup):
"""Test that Instance Delete Completes."""
def __init__(self):
super(InstanceDeleteWaitGroup, self).__init__(
InstanceDeleteRunnerFactory.instance())
@test
def instance_delete_wait(self):
"""Wait for existing instance to be gone."""
self.test_runner.run_instance_delete_wait()
|
nilq/baby-python
|
python
|
from reading import Maze, MazeReader
class Node(object):
"""Nodes for evolving in graph."""
def __init__(self, maze, x, y, parent=None):
"""Initialize node. Keep maze, parent, and position."""
self.maze = maze
self.x = x
self.y = y
self.parent = parent
self.up = None
self.left = None
self.down = None
self.right = None
def lookup_around(self):
"""Look if it's possible to go up, left, right and down. It it is,
expand properly everything, and put it into self.up, self.left,
self.right and self.down."""
x = self.x
y = self.y
maze = self.maze.maze
def around(x, y, maze):
"""Look around to see if cases are occupied."""
fields = [False, False, False, False]
# Particular cases.
# Lefter line.
if x == 0:
fields[0] = None
fields[2] = None
if y == len(maze):
fields[3] = None
if maze[y - 1][x] == 0:
fields[1] = True
elif y == 0:
fields[1] = None
if maze[y][x] == 0:
fields[3] = True
else:
if maze[y][x] == 0:
fields[3] = True
if maze[y - 1][x] == 0:
fields[1] = True
# Upper line.
elif y == 0:
fields[0] = None
fields[1] = None
if x == len(maze[0]):
fields[3] = None
if maze[y][x - 1] == 0:
fields[2] = True
else:
if maze[y][x] == 0:
fields[3] = True
if maze[y][x - 1] == 0:
fields[2] = True
# Downer line.
elif y == len(maze):
fields[2] = None
fields[3] = None
if x == len(maze[0]):
fields[1] = None
if maze[y - 1][x - 1] == 0:
fields[0] = True
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y - 1][x] == 0:
fields[1] = True
# Righter line.
elif x == len(maze[0]):
fields[1] = None
fields[3] = None
if y == len(maze):
fields[2] = None
if maze[y - 1][x - 1] == 0:
fields[0] = True
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y][x - 1] == 0:
fields[2] = True
# General cases.
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y - 1][x] == 0:
fields[1] = True
if maze[y][x - 1] == 0:
fields[2] = True
if maze[y][x] == 0:
fields[3] = True
return fields
def obstacle(cases):
"""Look if there's an obstacle near the point."""
for case in cases:
if case is None or case is False:
return True
return False
fields = around(x, y, maze)
if fields[0] is True and fields[1] is True:
if obstacle(around(x, y - 1, maze)) is False:
self.up = Node(self.maze, x, y - 1, self)
if fields[0] is True and fields[2] is True:
if obstacle(around(x - 1, y, maze)) is False:
self.left = Node(self.maze, x - 1, y, self)
if fields[2] is True and fields[3] is True:
if obstacle(around(x, y + 1, maze)) is False:
self.down = Node(self.maze, x, y + 1, self)
if fields[3] is True and fields[1] is True:
if obstacle(around(x + 1, y, maze)) is False:
self.right = Node(self.maze, x + 1, y, self)
def spaces(self, number):
"""Affichage du noeud."""
if self.up:
up = "Haut !"
else:
up = 'None'
if self.left:
left = "Gauche !"
else:
left = 'None'
if self.down:
down = "Bas !"
else:
down = 'None'
if self.right:
right = "Droite !"
else:
right = 'None'
rstring = number * ' ' + 'Noeud x: ' + str(self.x) + ' et y: ' + str(self.y) + '\n'
rstring += number * ' ' + up + '\n' + number * ' ' + left + '\n'
rstring += number * ' ' + down + '\n' + number * ' ' + right
return rstring
def __str__(self):
return self.spaces(0)
|
nilq/baby-python
|
python
|
import pyps
from sys import argv
if len(argv) == 1:
# this source include a main() with a call to foo() ; but foo() isn't defined !
w=pyps.workspace("broker01.c")
# We give a method to resolve missing module (here foo())
w.props.preprocessor_missing_file_handling="external_resolver"
w.props.preprocessor_missing_file_generator="python broker01.py"
# We display with cumulated effects because we want callees to be computed
w.fun.main.display(pyps.module.print_code_cumulated_effects)
else:
# tricky, use the test file as a simple broker too :p
print "foo.c"
|
nilq/baby-python
|
python
|
from typing import Any, Dict, Tuple
from dateutil.parser import parse
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricPartialFunctionTypes,
)
from great_expectations.expectations.metrics.map_metric import MapMetricProvider
from great_expectations.expectations.metrics.metric_provider import metric_partial
from great_expectations.expectations.metrics.util import filter_pair_metric_nulls
class ColumnPairValuesAGreaterThanB(MapMetricProvider):
condition_metric_name = "column_pair_values.a_greater_than_b"
condition_value_keys = (
"ignore_row_if",
"or_equal",
"parse_strings_as_datetimes",
"allow_cross_type_comparisons",
)
domain_keys = ("batch_id", "table", "column_A", "column_B")
@metric_partial(
engine=PandasExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.MAP_CONDITION_SERIES,
domain_type=MetricDomainTypes.COLUMN_PAIR,
)
def _pandas(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
ignore_row_if = metric_value_kwargs.get("ignore_row_if")
if not ignore_row_if:
ignore_row_if = "both_values_are_missing"
or_equal = metric_value_kwargs.get("or_equal")
parse_strings_as_datetimes = metric_value_kwargs.get(
"parse_strings_as_datetimes"
)
allow_cross_type_comparisons = metric_value_kwargs.get(
"allow_cross_type_comparisons"
)
df, compute_domain, accessor_domain = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN_PAIR
)
column_A, column_B = filter_pair_metric_nulls(
df[metric_domain_kwargs["column_A"]],
df[metric_domain_kwargs["column_B"]],
ignore_row_if=ignore_row_if,
)
if allow_cross_type_comparisons:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal:
return temp_column_A >= temp_column_B, compute_domain, accessor_domain
else:
return temp_column_A > temp_column_B, compute_domain, accessor_domain
|
nilq/baby-python
|
python
|
from authlib.common.urls import quote, unquote
def escape(s):
return quote(s, safe=b'~')
def unescape(s):
return unquote(s)
|
nilq/baby-python
|
python
|
import json
from typing import Optional
import pyrebase
from .database import Database
class FirebaseDatabase(Database):
def __init__(self, serialised_config: str):
super().__init__()
self.config = json.loads(serialised_config)
def add_document(self, doc_id: str, doc: dict) -> None:
self._upload_analysis(doc_id, doc)
def get_document_as_str(self, doc_id: str) -> Optional[str]:
return self._fetch_analysis_data(doc_id)
def _open_db_connection(self):
return pyrebase.initialize_app(self.config).database()
def _upload_analysis(self, replay_id: str, replay_analysis: dict) -> None:
db = self._open_db_connection()
db.child("zerg_macro_analyses").child(replay_id).set(
json.dumps(replay_analysis))
def _fetch_analysis_data(self, replay_id: str) -> str:
db = self._open_db_connection()
analysis_data = db.child("zerg_macro_analyses").child(
replay_id).get().val()
return analysis_data if analysis_data else ""
|
nilq/baby-python
|
python
|
from .s3 import *
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Get jobs from aCT.
Returns:
1: No proxy found.
2: One of the elements in job list is not a range.
3: One of the elements in job list is not a valid ID.
5: tmp directory not configured.
"""
import argparse
import sys
import shutil
import os
import logging
import act.client.jobmgr as jobmgr
import act.client.clicommon as clicommon
from act.client.errors import TargetDirExistsError
from act.client.errors import InvalidJobRangeError
from act.client.errors import InvalidJobIDError
from act.client.errors import TmpConfigurationError
from act.client.errors import NoJobDirectoryError
def getLocalDir(jobdir, dirname=''):
"""
Assemble destination directory for job results.
Raises:
TargetDirExistsError: Destination for job results already exists.
"""
if dirname:
dstDir = os.path.join(dirname, jobdir)
else:
dstDir = os.path.join(os.getcwd(), jobdir)
if not os.path.exists(dstDir):
return dstDir
else:
raise TargetDirExistsError(dstDir)
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Get jobs from aCT')
parser.add_argument('-a', '--all', action='store_true',
help='all jobs that match other criteria')
parser.add_argument('-j', '--jobs', default='',
help='comma separated list of job IDs or ranges')
parser.add_argument('-f', '--find', default='',
help='get only jobs with matching (sub)string in their name')
parser.add_argument('-s', '--state', default='',
help='get only jobs with certain state')
parser.add_argument('-v', '--verbose', action='store_true',
help='show more information')
parser.add_argument('-p', '--proxy', default=None,
help='custom path to proxy certificate')
parser.add_argument('-n', '--no-clean', action='store_true',
help='do not clean jobs')
clicommon.showHelpOnCommandOnly(parser)
args = parser.parse_args()
# logging
logFormat = "[%(asctime)s] [%(filename)s:%(lineno)d] [%(levelname)s] - %(message)s"
if args.verbose:
logging.basicConfig(format=logFormat, level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(format=logFormat, level=logging.DEBUG, filename=os.devnull)
# create a list of jobs to work on
if args.all:
jobs = [] # empty means all jobs
elif args.jobs:
try:
jobs = jobmgr.getIDsFromList(args.jobs)
except InvalidJobRangeError as e:
print("error: range '{}' is not a valid range".format(e.jobRange))
sys.exit(2)
except InvalidJobIDError as e:
print("error: ID '{}' is not a valid ID".format(e.jobid))
sys.exit(3)
else:
print("error: no jobs specified (use -a or -j)")
sys.exit(10)
# get proxy ID given proxy
proxyid = clicommon.getProxyIdFromProxy(args.proxy)
# get job info
manager = jobmgr.JobManager()
try:
results = manager.getJobs(proxyid, jobs, args.state, args.find)
except TmpConfigurationError:
print('error: tmp directory not configured')
sys.exit(5)
if not results.jobdicts:
print('no jobs to get')
sys.exit(0)
# copy job results
dontRemove = []
for result in results.jobdicts:
try:
if result['dir']: # if there are job results in tmp
dst_dirname = os.path.basename(os.path.normpath(result['name']))
dstdir = getLocalDir(dst_dirname)
shutil.copytree(result['dir'], dstdir)
print('Results stored at: {}'.format(dstdir))
else:
raise NoJobDirectoryError(result['dir'])
except NoJobDirectoryError as e:
print('error: tmp results directory {} does not exist'.format(e.jobdir))
except TargetDirExistsError as e:
print('error: job destination {} already exists'.format(e.dstdir))
# don't clean job that could not be removed
dontRemove.append(result['id'])
# delete jobs that should not be removed from results
for jobid in dontRemove:
for result in results.jobdicts:
if result['id'] == jobid:
jobix = results.clientIDs.index(result['id'])
del results.clientIDs[jobix]
del results.arcIDs[jobix]
del results.jobdicts[jobix]
# clean jobs
if not args.no_clean:
manager.forceCleanJobs(results)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Turkish Topic Model",
version="0.0.1",
author="Ali ÇİMEN, Sevinç GÜLSEÇEN",
author_email="cimenwd@gmailcom, gulsecen@istanbul.edu.tr",
description="Türkçe metin ön işleme ve konu analizi konusunda hazırlanmış fonksiyonlar kümesi",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=['pymysql >= 1.0.2',
'pandas >= 1.3.5',
'jpype1 >=1.3.0',
'requests >= 2.26.0',
'nltk >= 3.6.7',
'tomotopy >= 0.12.2']
)
|
nilq/baby-python
|
python
|
import pandas
from sklearn.model_selection import train_test_split
def add_series(X, y, name):
"""Converts list (y) to pd.Series with name (name)
then adds it to a dataframe (X)"""
X = X.copy()
series = pandas.Series(data=y, name=name)
X[name] = series
return X
def data_split(X):
"""Splits dataframe into train, validate, and test sets"""
train, test = train_test_split(X, random_state=20)
train, val = train_test_split(train, random_state=20)
return train, val, test
if __name__ == "__main__":
df = pandas.DataFrame({'odd': [1, 3, 5]})
test_list = [2, 4, 6]
df = add_series(df, test_list, 'even')
df
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
import re
class ResCompany(models.Model):
_inherit = 'res.company'
org_number = fields.Char(compute='_compute_org_number')
@api.depends('vat')
def _compute_org_number(self):
for company in self:
if company.country_id.code == "SE" and company.vat:
org_number = re.sub(r'\D', '', company.vat)[:-2]
org_number = org_number[:6] + '-' + org_number[6:]
company.org_number = org_number
else:
company.org_number = ''
|
nilq/baby-python
|
python
|
import numpy as np
from ..functions import B_nu, dB_nu_dT
from ..integrate import integrate_loglog
from ..constants import sigma, k, c
def test_b_nu():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute planck function
b = B_nu(nu, T)
# Check that the intergral is correct
total = integrate_loglog(nu, b)
np.testing.assert_allclose(total, sigma * T ** 4 / np.pi, rtol=1e-4)
# Check that we reach the rayleigh-jeans limit at low frequencies
rj = 2. * nu ** 2 * k * T / c**2
np.testing.assert_allclose(b[nu < 1e-10], rj[nu < 1e-10], rtol=1.e-8)
def test_db_nu_dt():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute exact planck function derivative
db = dB_nu_dT(nu, T)
# Compute numerical planck function derivative
dT = T / 1e6
b1 = B_nu(nu, T - dT)
b2 = B_nu(nu, T + dT)
db_num = 0.5 * (b2 - b1) / dT
# Check that the two are the same
np.testing.assert_allclose(db, db_num, rtol=1.e-2)
|
nilq/baby-python
|
python
|
"""Myia Pytorch frontend."""
from .pytorch import *
|
nilq/baby-python
|
python
|
"""
Dictionary Embedder Class
"""
import spacy
from .base import BaseEmbedder
class DictionaryEmbedder(BaseEmbedder):
"""Base Embedder class extended for implementing text embedders"""
def __init__(self, spacy_pkg='en_vectors_web_lg', embedding_length=None):
super(DictionaryEmbedder, self).__init__()
self.nlp = spacy.load(spacy_pkg)
self.embedding_length = embedding_length
def train(self, input_data):
"""Training is not required because spacy stores the vocab
"""
pass
def process(self, input_data):
"""Split the input text into an array of tokens, and replace each
token with the unique identifier for that token in the spacy vocab
"""
spacy_doc = self.nlp(unicode(input_data))
embeddings = []
for token in spacy_doc:
embeddings.append(token.rank)
if self.embedding_length \
and len(embeddings) == self.embedding_length:
break
if self.embedding_length and len(embeddings) < self.embedding_length:
embeddings.extend([0] * self.embedding_length - len(embeddings))
return embeddings
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Tests for Terminal methods that account for sequences in strings"""
# std imports
import os
import sys
import struct
import platform
import itertools
# 3rd party
import six
import pytest
# local
from .accessories import TestTerminal, as_subprocess
from .conftest import IS_WINDOWS
if platform.system() != 'Windows':
import fcntl
import termios
def test_length_cjk():
"""Test length of East Asian characters"""
@as_subprocess
def child():
term = TestTerminal()
# given,
given = term.bold_red(u'コンニチハ, セカイ!')
expected = sum((2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1,))
# exercise,
assert term.length(given) == expected
child()
def test_length_ansiart():
"""Test length of ANSI art"""
@as_subprocess
def child(kind):
import codecs
term = TestTerminal(kind=kind)
# this 'ansi' art contributed by xzip!impure for another project,
# unlike most CP-437 DOS ansi art, this is actually utf-8 encoded.
fname = os.path.join(os.path.dirname(__file__), 'wall.ans')
with codecs.open(fname, 'r', 'utf-8') as ansiart:
lines = ansiart.readlines()
assert term.length(lines[0]) == 67 # ^[[64C^[[34m▄▓▄
assert term.length(lines[1]) == 75
assert term.length(lines[2]) == 78
assert term.length(lines[3]) == 78
assert term.length(lines[4]) == 78
assert term.length(lines[5]) == 78
assert term.length(lines[6]) == 77
kind = 'vtwin10' if IS_WINDOWS else 'xterm-256color'
child(kind)
def test_sequence_length(all_terms):
"""Ensure T.length(string containing sequence) is correcterm."""
# pylint: disable=too-complex,too-many-statements
@as_subprocess
def child(kind):
term = TestTerminal(kind=kind, force_styling=True)
# Make sure to test with 24-bit color on at least one terminal
if kind == 'xterm':
term.number_of_colors = 1 << 24
# Create a list of ascii characters, to be separated
# by word, to be zipped up with a cycling list of
# terminal sequences. Then, compare the length of
# each, the basic plain_texterm.__len__ vs. the Terminal
# method length. They should be equal.
plain_text = (u'The softest things of the world '
u'Override the hardest things of the world '
u'That which has no substance '
u'Enters into that which has no openings')
if term.bold:
assert (term.length(term.bold) == 0)
assert (term.length(term.bold(u'x')) == 1)
assert (term.length(term.bold_red) == 0)
assert (term.length(term.bold_red(u'x')) == 1)
assert (term.length(term.bold_on_red) == 0)
assert (term.length(term.bold_on_red(u'x')) == 1)
assert (term.length(term.bold_olivedrab4) == 0)
assert (term.length(term.bold_olivedrab4(u'x')) == 1)
assert (term.length(term.bold_on_olivedrab4) == 0)
assert (term.length(term.bold_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.bold) == u'')
assert (term.rstrip(term.bold) == u'')
assert (term.lstrip(term.bold) == u'')
assert (term.strip(term.bold(u' x ')) == u'x')
assert (term.strip(term.bold(u'z x q'), 'zq') == u' x ')
assert (term.rstrip(term.bold(u' x ')) == u' x')
assert (term.lstrip(term.bold(u' x ')) == u'x ')
assert (term.strip(term.bold_red) == u'')
assert (term.rstrip(term.bold_red) == u'')
assert (term.lstrip(term.bold_red) == u'')
assert (term.strip(term.bold_on_red) == u'')
assert (term.rstrip(term.bold_on_red) == u'')
assert (term.lstrip(term.bold_on_red) == u'')
assert (term.strip(term.bold_olivedrab4) == u'')
assert (term.rstrip(term.bold_olivedrab4) == u'')
assert (term.lstrip(term.bold_olivedrab4) == u'')
assert (term.strip(term.bold_on_olivedrab4) == u'')
assert (term.rstrip(term.bold_on_olivedrab4) == u'')
assert (term.lstrip(term.bold_on_olivedrab4) == u'')
assert (term.strip(term.bold_red(u' x ')) == u'x')
assert (term.rstrip(term.bold_red(u' x ')) == u' x')
assert (term.lstrip(term.bold_red(u' x ')) == u'x ')
assert (term.strip(term.bold_on_red(u' x ')) == u'x')
assert (term.rstrip(term.bold_on_red(u' x ')) == u' x')
assert (term.lstrip(term.bold_on_red(u' x ')) == u'x ')
assert (term.strip(term.bold_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.bold_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.bold_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.bold_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.bold_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.bold_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.bold) == u'')
assert (term.strip_seqs(term.bold(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_red) == u'')
assert (term.strip_seqs(term.bold_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_on_red) == u'')
assert (term.strip_seqs(term.bold_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_olivedrab4) == u'')
assert (term.strip_seqs(term.bold_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_on_olivedrab4) == u'')
assert (term.strip_seqs(term.bold_on_olivedrab4(u' x ')) == u' x ')
if term.underline:
assert (term.length(term.underline) == 0)
assert (term.length(term.underline(u'x')) == 1)
assert (term.length(term.underline_red) == 0)
assert (term.length(term.underline_red(u'x')) == 1)
assert (term.length(term.underline_on_red) == 0)
assert (term.length(term.underline_on_red(u'x')) == 1)
assert (term.length(term.underline_olivedrab4) == 0)
assert (term.length(term.underline_olivedrab4(u'x')) == 1)
assert (term.length(term.underline_on_olivedrab4) == 0)
assert (term.length(term.underline_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.underline) == u'')
assert (term.strip(term.underline(u' x ')) == u'x')
assert (term.strip(term.underline_red) == u'')
assert (term.strip(term.underline_red(u' x ')) == u'x')
assert (term.rstrip(term.underline_red(u' x ')) == u' x')
assert (term.lstrip(term.underline_red(u' x ')) == u'x ')
assert (term.strip(term.underline_on_red) == u'')
assert (term.strip(term.underline_on_red(u' x ')) == u'x')
assert (term.rstrip(term.underline_on_red(u' x ')) == u' x')
assert (term.lstrip(term.underline_on_red(u' x ')) == u'x ')
assert (term.strip(term.underline_olivedrab4) == u'')
assert (term.strip(term.underline_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.underline_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.underline_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.underline_on_olivedrab4) == u'')
assert (term.strip(term.underline_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.underline_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.underline_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.underline) == u'')
assert (term.strip_seqs(term.underline(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_red) == u'')
assert (term.strip_seqs(term.underline_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_on_red) == u'')
assert (term.strip_seqs(term.underline_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_olivedrab4) == u'')
assert (term.strip_seqs(term.underline_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_on_olivedrab4) == u'')
assert (term.strip_seqs(term.underline_on_olivedrab4(u' x ')) == u' x ')
if term.reverse:
assert (term.length(term.reverse) == 0)
assert (term.length(term.reverse(u'x')) == 1)
assert (term.length(term.reverse_red) == 0)
assert (term.length(term.reverse_red(u'x')) == 1)
assert (term.length(term.reverse_on_red) == 0)
assert (term.length(term.reverse_on_red(u'x')) == 1)
assert (term.length(term.reverse_olivedrab4) == 0)
assert (term.length(term.reverse_olivedrab4(u'x')) == 1)
assert (term.length(term.reverse_on_olivedrab4) == 0)
assert (term.length(term.reverse_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.reverse) == u'')
assert (term.strip(term.reverse(u' x ')) == u'x')
assert (term.strip(term.reverse_red) == u'')
assert (term.strip(term.reverse_red(u' x ')) == u'x')
assert (term.rstrip(term.reverse_red(u' x ')) == u' x')
assert (term.lstrip(term.reverse_red(u' x ')) == u'x ')
assert (term.strip(term.reverse_on_red) == u'')
assert (term.strip(term.reverse_on_red(u' x ')) == u'x')
assert (term.rstrip(term.reverse_on_red(u' x ')) == u' x')
assert (term.lstrip(term.reverse_on_red(u' x ')) == u'x ')
assert (term.strip(term.reverse_olivedrab4) == u'')
assert (term.strip(term.reverse_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.reverse_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.reverse_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.reverse_on_olivedrab4) == u'')
assert (term.strip(term.reverse_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.reverse_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.reverse_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.reverse) == u'')
assert (term.strip_seqs(term.reverse(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_red) == u'')
assert (term.strip_seqs(term.reverse_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_on_red) == u'')
assert (term.strip_seqs(term.reverse_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_olivedrab4) == u'')
assert (term.strip_seqs(term.reverse_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_on_olivedrab4) == u'')
assert (term.strip_seqs(term.reverse_on_olivedrab4(u' x ')) == u' x ')
if term.blink:
assert (term.length(term.blink) == 0)
assert (term.length(term.blink(u'x')) == 1)
assert (term.length(term.blink_red) == 0)
assert (term.length(term.blink_red(u'x')) == 1)
assert (term.length(term.blink_on_red) == 0)
assert (term.length(term.blink_on_red(u'x')) == 1)
assert (term.length(term.blink_olivedrab4) == 0)
assert (term.length(term.blink_olivedrab4(u'x')) == 1)
assert (term.length(term.blink_on_olivedrab4) == 0)
assert (term.length(term.blink_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.blink) == u'')
assert (term.strip(term.blink(u' x ')) == u'x')
assert (term.strip(term.blink(u'z x q'), u'zq') == u' x ')
assert (term.strip(term.blink_red) == u'')
assert (term.strip(term.blink_red(u' x ')) == u'x')
assert (term.strip(term.blink_on_red) == u'')
assert (term.strip(term.blink_on_red(u' x ')) == u'x')
assert (term.strip(term.blink_olivedrab4) == u'')
assert (term.strip(term.blink_olivedrab4(u' x ')) == u'x')
assert (term.strip(term.blink_on_olivedrab4) == u'')
assert (term.strip(term.blink_on_olivedrab4(u' x ')) == u'x')
assert (term.strip_seqs(term.blink) == u'')
assert (term.strip_seqs(term.blink(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_red) == u'')
assert (term.strip_seqs(term.blink_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_on_red) == u'')
assert (term.strip_seqs(term.blink_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_olivedrab4) == u'')
assert (term.strip_seqs(term.blink_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_on_olivedrab4) == u'')
assert (term.strip_seqs(term.blink_on_olivedrab4(u' x ')) == u' x ')
if term.home:
assert (term.length(term.home) == 0)
assert (term.strip(term.home) == u'')
if term.clear_eol:
assert (term.length(term.clear_eol) == 0)
assert (term.strip(term.clear_eol) == u'')
if term.enter_fullscreen:
assert (term.length(term.enter_fullscreen) == 0)
assert (term.strip(term.enter_fullscreen) == u'')
if term.exit_fullscreen:
assert (term.length(term.exit_fullscreen) == 0)
assert (term.strip(term.exit_fullscreen) == u'')
# horizontally, we decide move_down and move_up are 0,
assert (term.length(term.move_down) == 0)
assert (term.length(term.move_down(2)) == 0)
assert (term.length(term.move_up) == 0)
assert (term.length(term.move_up(2)) == 0)
# other things aren't so simple, somewhat edge cases,
# moving backwards and forwards horizontally must be
# accounted for as a "length", as <x><move right 10><y>
# will result in a printed column length of 12 (even
# though columns 2-11 are non-destructive space
assert (term.length(u'x\b') == 0)
assert (term.strip(u'x\b') == u'')
# XXX why are some terminals width of 9 here ??
assert (term.length(u'\t') in (8, 9))
assert (term.strip(u'\t') == u'')
assert (term.length(u'_' + term.move_left) == 0)
assert (term.length(term.move_right) == 1)
if term.cub:
assert (term.length((u'_' * 10) + term.cub(10)) == 0)
if term.cuf:
assert (term.length(term.cuf(10)) == 10)
# vertical spacing is unaccounted as a 'length'
assert (term.length(term.move_up) == 0)
assert (term.length(term.cuu(10)) == 0)
assert (term.length(term.move_down) == 0)
assert (term.length(term.cud(10)) == 0)
# this is how manpages perform underlining, this is done
# with the 'overstrike' capability of teletypes, and aparently
# less(1), '123' -> '1\b_2\b_3\b_'
text_wseqs = u''.join(itertools.chain(
*zip(plain_text, itertools.cycle(['\b_']))))
assert (term.length(text_wseqs) == len(plain_text))
child(all_terms)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires cwcwidth which requires python3.6 or higher")
def test_length_zerowidth():
"""Test length of East Asian characters"""
@as_subprocess
def child():
term = TestTerminal()
# given,
given = term.bold_red(u'0123')
expected = sum((1, 1, 1, 1, 0, 0, 0, 0,))
# exercise,
assert term.length(given) == expected
child()
def test_env_winsize():
"""Test height and width is appropriately queried in a pty."""
@as_subprocess
def child():
# set the pty's virtual window size
os.environ['COLUMNS'] = '99'
os.environ['LINES'] = '11'
term = TestTerminal(stream=six.StringIO())
save_init = term._init_descriptor
save_stdout = sys.__stdout__
try:
term._init_descriptor = None
sys.__stdout__ = None
winsize = term._height_and_width()
width = term.width
height = term.height
finally:
term._init_descriptor = save_init
sys.__stdout__ = save_stdout
assert winsize.ws_col == width == 99
assert winsize.ws_row == height == 11
child()
@pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl")
def test_winsize(many_lines, many_columns):
"""Test height and width is appropriately queried in a pty."""
pixel_width, pixel_height = 1024, 768
@as_subprocess
def child(lines=25, cols=80):
# set the pty's virtual window size
val = struct.pack('HHHH', lines, cols, pixel_width, pixel_height)
fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val)
term = TestTerminal()
winsize = term._height_and_width()
assert term.width == cols
assert term.height == lines
assert winsize.ws_col == cols
assert winsize.ws_row == lines
assert term.pixel_width == pixel_width
assert term.pixel_height == pixel_height
child(lines=many_lines, cols=many_columns)
def test_Sequence_alignment_fixed_width(all_terms):
"""Test alignment methods with width provided"""
@as_subprocess
def child(kind):
term = TestTerminal(kind=kind)
pony_msg = 'pony express, all aboard, choo, choo!'
pony_len = len(pony_msg)
pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,)
for n, ch in enumerate(pony_msg))
pony_colored += term.normal
ladjusted = term.ljust(pony_colored, 88)
radjusted = term.rjust(pony_colored, 88)
centered = term.center(pony_colored, 88)
assert (term.length(pony_colored) == pony_len)
assert (term.length(centered.strip()) == pony_len)
assert (term.length(centered) == len(pony_msg.center(88)))
assert (term.length(ladjusted.strip()) == pony_len)
assert (term.length(ladjusted) == len(pony_msg.ljust(88)))
assert (term.length(radjusted.strip()) == pony_len)
assert (term.length(radjusted) == len(pony_msg.rjust(88)))
child(kind=all_terms)
@pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl")
def test_Sequence_alignment(all_terms):
"""Tests methods related to Sequence class, namely ljust, rjust, center."""
@as_subprocess
def child(kind, lines=25, cols=80):
# set the pty's virtual window size
val = struct.pack('HHHH', lines, cols, 0, 0)
fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val)
term = TestTerminal(kind=kind)
pony_msg = 'pony express, all aboard, choo, choo!'
pony_len = len(pony_msg)
pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,)
for n, ch in enumerate(pony_msg))
pony_colored += term.normal
ladjusted = term.ljust(pony_colored)
radjusted = term.rjust(pony_colored)
centered = term.center(pony_colored)
assert (term.length(pony_colored) == pony_len)
assert (term.length(centered.strip()) == pony_len)
assert (term.length(centered) == len(pony_msg.center(term.width)))
assert (term.length(ladjusted.strip()) == pony_len)
assert (term.length(ladjusted) == len(pony_msg.ljust(term.width)))
assert (term.length(radjusted.strip()) == pony_len)
assert (term.length(radjusted) == len(pony_msg.rjust(term.width)))
child(kind=all_terms)
def test_hyperlink_nostyling():
"""Test length our of hyperlink URL's."""
@as_subprocess
def child():
# given,
term = TestTerminal(force_styling=None)
given_basic_url = term.link(
'https://blessed.readthedocs.org', 'blessed')
assert given_basic_url == 'blessed'
child()
def test_basic_hyperlinks():
"""Test length our of hyperlink URL's."""
@as_subprocess
def child():
# given,
term = TestTerminal()
given_basic_url = term.link(
'https://blessed.readthedocs.org', 'blessed')
# exercise,
split_parts = term.split_seqs(given_basic_url)
# verify
if term.does_styling:
assert split_parts[0] == '\x1b]8;;https://blessed.readthedocs.org\x1b\\'
assert term.length(split_parts[0]) == 0
assert ''.join(split_parts[1:8]) == 'blessed'
assert split_parts[8] == '\x1b]8;;\x1b\\'
assert len(split_parts) == 9
else:
assert ''.join(split_parts) == 'blessed'
child()
def test_hyperlink_with_id():
"""Test length our of hyperlink URL's with ID."""
@as_subprocess
def child():
# given,
term = TestTerminal()
given_advanced_urltext = term.link(
'https://blessed.readthedocs.org', 'blessed', '123')
# exercise,
split_parts = term.split_seqs(given_advanced_urltext)
# verify,
if term.does_styling:
assert split_parts[0] == '\x1b]8;id=123;https://blessed.readthedocs.org\x1b\\'
assert term.length(split_parts[0]) == 0
assert ''.join(split_parts[1:8]) == 'blessed'
assert split_parts[8] == '\x1b]8;;\x1b\\'
assert len(split_parts) == 9
else:
assert ''.join(split_parts) == 'blessed'
child()
def test_sequence_is_movement_false(all_terms):
"""Test parser about sequences that do not move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
assert measure_length(u'', term) == 0
# not even a mbs
assert measure_length(u'xyzzy', term) == 0
# negative numbers, though printable as %d, do not result
# in movement; just garbage. Also not a valid sequence.
assert measure_length(term.cuf(-333), term) == 0
assert (len(term.clear_eol) == measure_length(term.clear_eol, term))
# various erases don't *move*
assert (len(term.clear_bol) == measure_length(term.clear_bol, term))
assert (len(term.clear_eos) == measure_length(term.clear_eos, term))
assert (len(term.bold) == measure_length(term.bold, term))
# various paints don't move
assert (len(term.red) == measure_length(term.red, term))
assert (len(term.civis) == measure_length(term.civis, term))
if term.cvvis:
assert (len(term.cvvis) == measure_length(term.cvvis, term))
assert (len(term.underline) == measure_length(term.underline, term))
assert (len(term.reverse) == measure_length(term.reverse, term))
for _num in (0, term.number_of_colors):
expected = len(term.color(_num))
given = measure_length(term.color(_num), term)
assert (expected == given)
assert (len(term.normal_cursor) == measure_length(term.normal_cursor, term))
assert (len(term.hide_cursor) == measure_length(term.hide_cursor, term))
assert (len(term.save) == measure_length(term.save, term))
assert (len(term.italic) == measure_length(term.italic, term))
assert (len(term.standout) == measure_length(term.standout, term)
), (term.standout, term._wont_move)
child(all_terms)
def test_termcap_will_move_false(all_terms): # pylint: disable=too-complex,too-many-branches
"""Test parser about sequences that do not move the cursor."""
@as_subprocess
def child(kind): # pylint: disable=too-many-branches
from blessed.sequences import iter_parse
term = TestTerminal(kind=kind)
if term.clear_eol:
assert not next(iter_parse(term, term.clear_eol))[1].will_move
if term.clear_bol:
assert not next(iter_parse(term, term.clear_bol))[1].will_move
if term.clear_eos:
assert not next(iter_parse(term, term.clear_eos))[1].will_move
if term.bold:
assert not next(iter_parse(term, term.bold))[1].will_move
if term.red:
assert not next(iter_parse(term, term.red))[1].will_move
if term.civis:
assert not next(iter_parse(term, term.civis))[1].will_move
if term.cvvis:
assert not next(iter_parse(term, term.cvvis))[1].will_move
if term.underline:
assert not next(iter_parse(term, term.underline))[1].will_move
if term.reverse:
assert not next(iter_parse(term, term.reverse))[1].will_move
if term.color(0):
assert not next(iter_parse(term, term.color(0)))[1].will_move
if term.normal_cursor:
assert not next(iter_parse(term, term.normal_cursor))[1].will_move
if term.save:
assert not next(iter_parse(term, term.save))[1].will_move
if term.italic:
assert not next(iter_parse(term, term.italic))[1].will_move
if term.standout:
assert not next(iter_parse(term, term.standout))[1].will_move
child(all_terms)
def test_sequence_is_movement_true(all_terms):
"""Test parsers about sequences that move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
# movements
assert (len(term.move(98, 76)) ==
measure_length(term.move(98, 76), term))
assert (len(term.move(54)) ==
measure_length(term.move(54), term))
assert (len(term.move_xy(1, 2)) ==
measure_length(term.move(1, 2), term))
assert (len(term.move_yx(3, 4)) ==
measure_length(term.move(3, 4), term))
assert not term.cud1 or (len(term.cud1) ==
measure_length(term.cud1, term))
assert not term.cub1 or (len(term.cub1) ==
measure_length(term.cub1, term))
assert not term.cuf1 or (len(term.cuf1) ==
measure_length(term.cuf1, term))
assert not term.cuu1 or (len(term.cuu1) ==
measure_length(term.cuu1, term))
assert not term.cub or (len(term.cub(333)) ==
measure_length(term.cub(333), term))
assert not term.cuf or (len(term.cuf(333)) ==
measure_length(term.cuf(333), term))
assert not term.home or (len(term.home) ==
measure_length(term.home, term))
assert not term.restore or (len(term.restore) ==
measure_length(term.restore, term))
assert not term.clear or (len(term.clear) ==
measure_length(term.clear, term))
child(all_terms)
def test_termcap_will_move_true(all_terms):
"""Test parser about sequences that move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import iter_parse
term = TestTerminal(kind=kind, force_styling=True)
assert next(iter_parse(term, term.move(98, 76)))[1].will_move
assert next(iter_parse(term, term.move_yx(8, 76)))[1].will_move
assert next(iter_parse(term, term.move_xy(98, 7)))[1].will_move
assert next(iter_parse(term, term.move(54)))[1].will_move
assert next(iter_parse(term, term.cud1))[1].will_move
assert next(iter_parse(term, term.cub1))[1].will_move
assert next(iter_parse(term, term.cuf1))[1].will_move
assert next(iter_parse(term, term.cuu1))[1].will_move
if term.cub(333):
assert next(iter_parse(term, term.cub(333)))[1].will_move
if term.cuf(333):
assert next(iter_parse(term, term.cuf(333)))[1].will_move
assert next(iter_parse(term, term.home))[1].will_move
assert next(iter_parse(term, term.restore))[1].will_move
assert next(iter_parse(term, term.clear))[1].will_move
child(all_terms)
def test_foreign_sequences():
"""Test parsers about sequences received from foreign sources."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
assert measure_length(u'\x1b[m', term) == len('\x1b[m')
child(kind='ansi')
|
nilq/baby-python
|
python
|
def get_token(fi='dont.mess.with.me'):
with open(fi, 'r') as f:
return f.readline().strip()
t = get_token()
|
nilq/baby-python
|
python
|
import re
from address_extractor import (
unit_type,
zipcode,
street_direction,
street_type,
cities,
)
class InvalidAddressError(Exception):
pass
class Address(object):
def __init__(self, tokens):
self.tokens = tuple(self._clean_tokens(tokens[:11]))
self.street_number_index = None
self.street_direction_index = None
self.street_name_range = None
self.street_type_index = None
self.unit_range = None
# self.unit_type_index = None
# self.unit_number_index = None
self.city_range = None
self.state_index = None
self.zipcode_index = None
self.error = None
self._remaining_indices = []
self._parse()
def _clean_tokens(self, original_tokens):
tokens = []
for token in original_tokens:
cleaned = token.replace(".", "").replace(",", "")
if cleaned.startswith("#"):
cleaned = cleaned.replace("#", "")
tokens.append("#")
tokens.append(cleaned)
return tokens
@property
def is_valid(self):
return self.error is None
def _ordered_parts(self):
return [
self.street_number,
self.street_direction,
self.street_name,
self.street_type,
self.unit,
self.city,
self.state,
self.zipcode,
]
def _render_parts(self):
parts = self._ordered_parts()
return " ".join([p for p in parts if p is not None])
def __str__(self):
if not self.is_valid:
return ""
return self._render_parts()
def __repr__(self):
if self.error is None:
msg = "<address_extractor.Address address: {addr}>"
return msg.format(addr=str(self))
else:
msg = "<address_extractor.Address error: {err}, address: {addr}>"
return msg.format(err=self.error, addr=self._render_parts())
def _parse(self):
"""
Programmatically and sequentially locate the most predictable parts
of an address.
"""
try:
self._remaining_indices = list(range(len(self.tokens)))
self._extract_street_number()
self._extract_state()
self._extract_zipcode()
self._extract_city()
self._remove_indices_after_zipcode()
self._extract_street_type()
self._extract_street_name()
self._extract_unit()
self._check_remaining_indices()
except InvalidAddressError:
pass
except IndexError:
self.error = "Invalid Address Format - Too short"
pass
@property
def street_number(self):
return self._get_by_index("street_number_index")
@property
def street_name(self):
return self._get_by_range("street_name_range")
@property
def city(self):
return self._get_by_range("city_range")
@property
def street_type(self):
return self._get_by_index("street_type_index")
@property
def state(self):
return self._get_by_index("state_index")
@property
def zipcode(self):
return self._get_by_index("zipcode_index")
@property
def unit(self):
return self._get_by_range("unit_range")
@property
def unit_number(self):
return self._get_by_index("unit_number_index")
@property
def street_direction(self):
return self._get_by_index("street_direction_index")
def _get_by_index(self, name):
index = getattr(self, name)
if index is not None:
return self.tokens[index]
def _get_by_range(self, name):
ranged = getattr(self, name)
if isinstance(ranged, tuple):
low = ranged[0]
high = ranged[1]
if high == low:
return self.tokens[low]
else:
return " ".join(self.tokens[low:high])
def _extract_state(self):
for index in range(len(self.tokens)):
if zipcode.is_state(self.tokens[index]):
self._remaining_indices.remove(index)
self.state_index = index
return
self.error = "State Not Found"
raise InvalidAddressError
def _extract_street_number(self):
if self.tokens[0].isnumeric():
self.street_number_index = 0
self._remaining_indices.remove(0)
return
self.error = "Invalid Street Number"
raise InvalidAddressError
def _extract_street_type(self):
street_type_indices = self._index_of_street_type()
lowest_street_type_index = min(street_type_indices)
def _extract_zipcode(self):
"""
depends_on:
- state_index
"""
index = self.state_index + 1
token = self.tokens[index]
if zipcode.is_zipcode_5(token) or zipcode.is_zipcode_dashed(token):
self.zipcode_index = index
self._remaining_indices.remove(index)
return
self.error = "Zipcode Not Found"
raise InvalidAddressError
def _extract_city(self):
maybe_city = []
for index in reversed(self._remaining_indices):
if not index < self.state_index:
# not interested in things found after the state
continue
maybe_city = [self.tokens[index]] + maybe_city
#print("maybe_city", maybe_city, "with index", index)
# the 'st' of `st louis` is not in the zipcode info
# so we expand the abbreviation 'st' into 'saint'
city_parts = [cities.expand_abbreviation(p) for p in maybe_city]
city = " ".join(city_parts)
#print("city is", city)
is_city = zipcode.is_valid_place(city, self.state, self.zipcode)
if is_city:
self.city_range = (index, self.state_index)
inner_range = range(self.state_index - index)
for inner in [x+index for x in inner_range]:
self._remaining_indices.remove(inner)
return
if not self.city_range:
self.error = "Invalid City/State/Zipcode Combo"
raise InvalidAddressError
def _extract_unit(self):
"""
No error from this method because it is optional
depends_on:
- city_range
- street_type_index
"""
start = self.street_type_index
stop = min(self.city_range)
unit_indices = []
has_a_unit_type = False
for index in reversed(self._remaining_indices):
if index > start and index < stop:
token = self.tokens[index]
if unit_type.is_unit_type(token):
has_a_unit_type = True
unit_indices.append(index)
if has_a_unit_type or (len(unit_indices) == 1 and self.tokens[unit_indices[0]].isnumeric()):
for index in unit_indices:
self._remaining_indices.remove(index)
self.unit_range = (min(unit_indices), stop)
# def _extract_street_direction(self):
# """
# No error from this method because it is optional
# depends on:
# - street_number_index
# - street_type_index
# """
# street_direction_index = self.street_number_index + 1
# if (street_direction_index + 1) == self.street_type_index:
# # if the street_type_index is the next index
# # then the thing at street_direction_index is the
# # street_name and not a direction no matter
# # what it looks like
# return
# maybe_direction = self.tokens[street_direction_index]
# if street_direction.is_direction(maybe_direction):
# self.street_direction_index = street_direction_index
# self._remaining_indices.remove(street_direction_index)
def _extract_street_type(self):
kept = []
# find all candidate street types
for index in self._remaining_indices:
if street_type.is_valid(self.tokens[index]):
kept.append(index)
# we want the first index that matches closest to the start of the city
# so we reverse the indices and then filter for only those
# indices that are before the city
city_starts = min(self.city_range)
for index in reversed(kept):
if index >= city_starts:
# the street type must come before the city starts
continue
self.street_type_index = index
self._remaining_indices.remove(index)
return
self.error = "No Street Type"
raise InvalidAddressError
def _extract_street_name(self):
"""
depends_on:
- street_number_index
- street_type_index
"""
limit = self.street_type_index
parts = [i for i in self._remaining_indices if i < limit]
if len(parts) > 4:
self.errors = "Street name too long"
raise InvalidAddressError
if len(parts) == 0:
self.error = "No Street Name"
raise InvalidAddressError
for i in parts:
self._remaining_indices.remove(i)
if len(parts) > 1:
direction_index = parts[0]
direction_token = self.tokens[direction_index]
is_direction = street_direction.is_direction(direction_token)
if is_direction:
self.street_direction_index = direction_index
parts.remove(direction_index)
self.street_name_range = (min(parts), self.street_type_index)
def _check_remaining_indices(self):
if len(self._remaining_indices) > 0:
self.error = "Address has unidentified parts"
raise InvalidAddressError
def _remove_indices_after_zipcode(self):
remaining = self._remaining_indices[:]
for index in remaining:
if index > self.zipcode_index:
self._remaining_indices.remove(index)
def tokenize_text(text):
return [t for t in re.split("\s+", text) if len(t) > 0]
def extract_all(text):
addresses = []
tokens = tokenize_text(text)
skip_to = 0
# print("tokens", tokens)
for (index, token) in enumerate(tokens):
# print("scanning token", index, token)
if index < skip_to:
# print("skipping", index, token)
continue
if token.isnumeric():
# print("found numeric", token)
address = Address(tokens[index:])
if address.is_valid:
skip_to = index + address.zipcode_index + 1
# print("updated skip_to", skip_to, "by", address)
else:
# print("invalid address", address.error, address.tokens)
pass
addresses.append(address)
return addresses
|
nilq/baby-python
|
python
|
import os.path
from newsplease.pipeline.pipelines.elements.extracted_information_storage import ExtractedInformationStorage
class HtmlFileStorage(ExtractedInformationStorage):
"""
Handles storage of the file on the local system
"""
# Save the html and filename to the local storage folder
def process_item(self, item, spider):
# Add a log entry confirming the save
self.log.info("Saving HTML to %s", item['abs_local_path'])
# Ensure path exists
dir_ = os.path.dirname(item['abs_local_path'])
os.makedirs(dir_, exist_ok=True)
# Write raw html to local file system
with open(item['abs_local_path'], 'wb') as file_:
file_.write(item['spider_response'].body)
return item
|
nilq/baby-python
|
python
|
import json
from decimal import Decimal
import jwt
from django.core import mail
from mock import patch
from nose.tools import eq_, ok_
from amo import CONTRIB_PENDING, CONTRIB_PURCHASE
from amo.tests import TestCase
from amo.urlresolvers import reverse
from constants.payments import PROVIDER_BANGO
from market.models import Price, PriceCurrency
from users.models import UserProfile, GroupUser
from mkt.api.base import get_url, list_url
from mkt.api.tests.test_oauth import BaseOAuth
from mkt.constants import regions
from mkt.purchase.tests.utils import PurchaseTest
from mkt.site.fixtures import fixture
from mkt.webpay.models import ProductIcon
from stats.models import Contribution
class TestPrepare(PurchaseTest, BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519', 'prices')
def setUp(self):
BaseOAuth.setUp(self, api_name='webpay')
self.create_switch('marketplace')
self.list_url = list_url('prepare')
self.user = UserProfile.objects.get(pk=2519)
def test_allowed(self):
self._allowed_verbs(self.list_url, ['post'])
def test_anon(self):
eq_(self.anon.post(self.list_url, data={}).status_code, 401)
def test_good(self):
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
contribution = Contribution.objects.get()
eq_(res.status_code, 201)
eq_(res.json['contribStatusURL'], reverse('api_dispatch_detail',
kwargs={'api_name': 'webpay', 'resource_name': 'status',
'uuid': contribution.uuid}))
ok_(res.json['webpayJWT'])
@patch('mkt.webapps.models.Webapp.has_purchased')
def test_already_purchased(self, has_purchased):
has_purchased.return_value = True
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
eq_(res.status_code, 409)
eq_(res.content, '{"reason": "Already purchased app."}')
def _post(self):
return self.client.post(self.list_url,
data=json.dumps({'app': 337141}))
def test_waffle_fallback(self):
self.setup_base()
self.setup_package()
flag = self.create_flag('override-app-purchase', everyone=None)
flag.users.add(self.user.user)
with self.settings(PURCHASE_LIMITED=True):
eq_(self._post().status_code, 201)
class TestStatus(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestStatus, self).setUp(api_name='webpay')
self.contribution = Contribution.objects.create(
addon_id=337141, user_id=2519, type=CONTRIB_PURCHASE,
uuid='some:uid')
self.get_url = ('api_dispatch_detail', {
'api_name': 'webpay', 'resource_name': 'status',
'uuid': self.contribution.uuid})
def test_allowed(self):
self._allowed_verbs(self.get_url, ['get'])
def test_get(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['status'], 'complete')
def test_no_contribution(self):
self.contribution.delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_incomplete(self):
self.contribution.update(type=CONTRIB_PENDING)
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_no_purchase(self):
self.contribution.addon.addonpurchase_set.get().delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
class TestPrices(BaseOAuth):
def make_currency(self, amount, tier, currency, region):
return PriceCurrency.objects.create(price=Decimal(amount), tier=tier,
currency=currency, provider=PROVIDER_BANGO, region=region.id)
def setUp(self):
super(TestPrices, self).setUp(api_name='webpay')
self.price = Price.objects.create(name='1', price=Decimal(1))
self.currency = self.make_currency(3, self.price, 'DE', regions.DE)
self.us_currency = self.make_currency(3, self.price, 'USD', regions.US)
self.list_url = list_url('prices')
self.get_url = get_url('prices', self.price.pk)
# If regions change, this will blow up.
assert regions.BR.default_currency == 'BRL'
def get_currencies(self, data):
return [p['currency'] for p in data['prices']]
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get'])
self._allowed_verbs(self.get_url, ['get'])
def test_single(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['pricePoint'], '1')
eq_(res.json['name'], 'Tier 1')
# Ensure that price is in the JSON since solitude depends upon it.
eq_(res.json['price'], '1.00')
def test_price_point(self):
res = self.client.get(self.list_url + ({'pricePoint': '1'},))
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['pricePoint'], '1')
def test_list(self):
res = self.client.get(self.list_url)
eq_(res.json['meta']['total_count'], 1)
self.assertSetEqual(self.get_currencies(res.json['objects'][0]),
['USD', 'DE'])
def test_list_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.list_url + ({'provider': 'bango'},))
eq_(self.get_currencies(res.json['objects'][0]), ['USD'])
def test_prices(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD', 'DE'])
def test_prices_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.get_url + ({'provider': 'bango'},))
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD'])
def test_has_cors(self):
self.assertCORS(self.client.get(self.get_url), 'get')
@patch('mkt.webpay.resources.PriceResource.dehydrate_prices')
def test_other_cors(self, prices):
prices.side_effect = ValueError
res = self.client.get(self.get_url)
eq_(res.status_code, 500)
self.assertCORS(res, 'get')
def test_locale(self):
self.make_currency(5, self.price, 'BRL', regions.BR)
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized']['locale'], 'R$5,00')
def test_locale_list(self):
# Check that for each price tier a different localisation is
# returned.
self.make_currency(2, self.price, 'BRL', regions.BR)
price_two = Price.objects.create(name='2', price=Decimal(1))
self.make_currency(12, price_two, 'BRL', regions.BR)
res = self.client.get(self.list_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['objects'][0]['localized']['locale'], 'R$2,00')
eq_(res.json['objects'][1]['localized']['locale'], 'R$12,00')
def test_no_locale(self):
# This results in a region of BR and a currency of BRL. But there
# isn't a price tier for that currency. So we don't know what to show.
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized'], {})
class TestNotification(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestNotification, self).setUp(api_name='webpay')
self.grant_permission(self.profile, 'Transaction:NotifyFailure')
self.contribution = Contribution.objects.create(addon_id=337141,
uuid='sample:uuid')
self.list_url = ('api_dispatch_list', {'resource_name': 'failure'})
self.get_url = ['api_dispatch_detail',
{'resource_name': 'failure',
'pk': self.contribution.pk}]
def test_list_allowed(self):
self._allowed_verbs(self.get_url, ['patch'])
def test_notify(self):
url = 'https://someserver.com'
res = self.client.patch(self.get_url,
data=json.dumps({'url': url, 'attempts': 5}))
eq_(res.status_code, 202)
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
assert url in msg.body
eq_(msg.recipients(), [u'steamcube@mozilla.com'])
def test_no_permission(self):
GroupUser.objects.filter(user=self.profile).delete()
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 401)
def test_missing(self):
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 400)
def test_not_there(self):
self.get_url[1]['pk'] += 1
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
def test_no_uuid(self):
self.contribution.update(uuid=None)
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
class TestProductIconResource(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestProductIconResource, self).setUp(api_name='webpay')
self.list_url = list_url('product/icon')
p = patch('mkt.webpay.resources.tasks.fetch_product_icon')
self.fetch_product_icon = p.start()
self.addCleanup(p.stop)
self.data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64
}
def post(self, data, with_perms=True):
if with_perms:
self.grant_permission(self.profile, 'ProductIcon:Create')
return self.client.post(self.list_url, data=json.dumps(data))
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get', 'post'])
def test_missing_fields(self):
res = self.post({'ext_size': 1})
eq_(res.status_code, 400)
def test_post(self):
res = self.post(self.data)
eq_(res.status_code, 202)
self.fetch_product_icon.delay.assert_called_with(self.data['ext_url'],
self.data['ext_size'],
self.data['size'])
def test_post_without_perms(self):
res = self.post(self.data, with_perms=False)
eq_(res.status_code, 401)
def test_anon_get(self):
data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64,
'format': 'png'
}
icon = ProductIcon.objects.create(**data)
# We don't need to filter by these:
data.pop('size')
data.pop('format')
res = self.anon.get(self.list_url, data=data)
eq_(res.status_code, 200)
ob = json.loads(res.content)['objects'][0]
eq_(ob['url'], icon.url())
class TestSigCheck(TestCase):
def test(self):
key = 'marketplace'
aud = 'webpay'
secret = 'third door on the right'
with self.settings(APP_PURCHASE_SECRET=secret,
APP_PURCHASE_KEY=key,
APP_PURCHASE_AUD=aud):
res = self.client.post(reverse('webpay.sig_check'))
eq_(res.status_code, 201, res)
data = json.loads(res.content)
req = jwt.decode(data['sig_check_jwt'].encode('ascii'), secret)
eq_(req['iss'], key)
eq_(req['aud'], aud)
eq_(req['typ'], 'mozilla/payments/sigcheck/v1')
|
nilq/baby-python
|
python
|
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib import animation
from optimisation import FireflyOptimizer
import numpy as np
from optimisation import Ackley
f_alg = FireflyOptimizer(population_size=10, problem_dim=2, generations=100)
func = Ackley(2)
N = 100
x = np.linspace(-5, 5, N)
y = np.linspace(-5, 5, N)
X, Y = np.meshgrid(x, y)
z = func.get_y_2d(X, Y)
# dt = 1. / 30
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = fig.add_subplot(111, aspect='equal', xlim=(-5, 5), ylim=(-5, 5)) # autoscale_on=False)
cs = ax.contourf(X, Y, z, cmap=cm.PuBu_r)
cbar = fig.colorbar(cs)
particles, = ax.plot([], [], 'bo', ms=6)
rect = plt.Rectangle([-5, 5], 10, 10, ec='none', lw=2, fc='none')
ax.add_patch(rect)
def init():
global f_alg, rect
particles.set_data([], [])
rect.set_edgecolor('none')
return particles, rect
def animate(i):
global f_alg, rect, ax, fig
ms = int(fig.dpi * 2 * 0.04 * fig.get_figwidth()
/ np.diff(ax.get_xbound())[0])
rect.set_edgecolor('k')
x = []
y = []
for ind in f_alg.population:
x.append(ind.position[0])
y.append(ind.position[1])
f_alg.step()
particles.set_data(x, y)
particles.set_markersize(ms)
return particles, rect
ani = animation.FuncAnimation(fig, animate, frames=200, interval=10,
blit=True, init_func=init)
ani.save('videos/ackley_firefly.mp4', fps=5, extra_args=['-vcodec', 'libx264'])
plt.show()
#animate_firefly_convergence()
|
nilq/baby-python
|
python
|
import socket
import pickle
import pandas as pd
import matplotlib.pyplot as plt
def graph_setting():
plt.ion()
fig, ax = plt.subplots()
return ax
def data_get(df, conn, i):
data = conn.recv(1024)
data = float(pickle.loads(data))
df_temp = pd.DataFrame([[data]], columns=['data'])
df = pd.concat([df, df_temp], axis=0)
df = df.reset_index(drop = True)
return df
def run_server(host='127.0.0.1', port=7788):
ax = graph_setting()
with socket.socket() as sock:
sock.bind((host, port))
sock.listen()
conn, addr = sock.accept()
df1 = pd.DataFrame(columns=['data1'])
df2 = pd.DataFrame(columns=['data2'])
df3 = pd.DataFrame(columns=['data3'])
df4 = pd.DataFrame(columns=['data4'])
i = 0
ax.plot(df1, 'deepskyblue', label='IntellivueABP',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df2, 'skyblue', label='IntellivuePLETH',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df3, 'navy', label='PrimusAWP',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df4, 'dodgerblue', label='PrimusCO2',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.set_facecolor('blanchedalmond')
while True:
df1 = data_get(df1, conn, i)
df2 = data_get(df2, conn, i)
df3 = data_get(df3, conn, i)
df4 = data_get(df4, conn, i)
ax.plot(df1, 'deepskyblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df2, 'skyblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df3, 'navy',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df4, 'dodgerblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.set_xlabel('EPOCH')
ax.set_ylabel('TXT Y Value')
plt.legend(loc='upper left')
if i >= 100 :
plt.xlim([i-99, i])
plt.show()
plt.pause(0.01)
i += 1
if __name__ == '__main__':
run_server()
|
nilq/baby-python
|
python
|
import re
from csv import DictReader, DictWriter
from .utils import get_headers
class Dataset(object):
def __init__(self, key=None, headers=None, data=None):
self.headers = headers
self.data = data
self.key = key
def write(self, fd, delim='\t'):
writer = DictWriter(fd, self.headers, delimiter=delim)
writer.writeheader()
for row in self.data:
writer.writerow(row)
@classmethod
def from_file(cls, fd, delim='\t'):
reader = DictReader(fd, delimiter=delim)
data = [row for row in reader]
headers = data[0].keys() if len(data) > 0 else None
return cls(headers=headers, data=data)
def merge(self, other_dataset):
return Dataset(headers=set(self.headers + other_dataset.headers),
data=(self.data + other_dataset.data))
def map(self, transform):
def dict_update(row, extra):
row.update(extra)
return row
data = [dict_update(row, transform(row)) for row in self.data]
return Dataset(headers=get_headers(data), data=data)
def filter(self, predicate):
data = [row for row in self.data if predicate(row)]
return Dataset(headers=get_headers(data), data=data)
def select(self, columns):
col_spec = self._select_columns(columns)
headers = [header for i, header in enumerate(self.headers)
if i in col_spec or header in col_spec]
data = [{header:row[header] for header in headers} for row in self.data]
return Dataset(headers=headers, data=data)
def group_by(self, columns):
# return GroupedDataset(groups)
pass #returns GroupedDataset
def agg(self, agg_func):
return agg_func(self.data)
def _select_columns(self, columns):
"""Given a list of string representations of columns, returns a set of integers
representing each column.
>>> d = Dataset(headers=['this', 'that'])
>>> d._select_columns('1-3')
set(1, 2, 3)
>>> d._select_columns('1-3,8-12')
set(1, 2, 3, 8, 9, 10, 11, 12)
>>> d._select_columns('that')
set(1)
>>> d._select_columns('this,that')
set(0, 1)
"""
if isinstance(columns, basestring):
columns = [columns]
col_spec = set()
for col in columns:
if col.startswith('-') or col[0] in '0123456789':
m = re.match(r'(?P<first>[0-9]*)-(?P<last>[0-9]*)', col)
first = int(m.group('first')) if m.group('first') else 0
last = int(m.group('last')) if m.group('last') else len(self.headers)
col_spec |= set(range(first, last))
else:
col_spec.add(col)
return col_spec
class GroupedDataset(object):
def __init__(self, groups):
self.groups = groups
def agg(self, agg_func):
pass
#return [group.agg(agg_func) for group in self.groups]
# def test_parse():
# test_cases = [
# '1-3',
# '1-magic',
# 'a-c',
# '1-3,5-7'
# ]
# for test in test_cases:
# print test, parse(test)
#number = d+
#range = number|(number?-number?)
#name = [a-zA-Z0-9_-]+
#term = (name|range)
#statement = term(,term)*
#from pyparsing import *
#dash = Literal('-')
#lodash = Literal('_')
#comma = Literal(',')
#number = Word(nums)
#range_ = number | Combine(Optional(number) + dash + Optional(number))
#name = Word(alphas + nums + '_')
#term = name | range_
#statement = term | Combine(term + Optional(Combine(comma + ZeroOrMore(term))))
#return statement.parseString(string)
#for x in tmp:
#y = x.split('-')
#if len(y) == 0: continue
#if len(y) == 1: cols.add(int(y[0]))
#if len(y) == 2: cols.update(range(int(y[0]), int(y[1])+1))
#if len(y) > 2: raise ValueError("Misformatted columnspec.")
#return sorted(list(cols))
|
nilq/baby-python
|
python
|
from telnetlib import Telnet
from uuid import uuid4
from time import sleep
from hashlib import md5
from os import chmod
from re import compile as compile_regex
from sys import version_info
from .abstractremoteshell import AbstractRemoteShell
from .shellresult import ShellResult
from .streamreader import PrefixedStreamReader
from .queue import Queue
class TelnetShell(AbstractRemoteShell):
def __init__(self, hostname, username, password=None, port=23, *args, **kwargs):
super(TelnetShell, self).__init__(hostname, *args, **kwargs)
self._prompt = self._id
self._hostname = hostname
self._username = username
self._password = password
self._port = port
self._telnet = Telnet()
self._is_connected = False
self._buffer = ""
self.connect()
def do_connect(self):
self._telnet.open(self._hostname, self._port)
self._read_until("login: ")
self._write(self._username + "\n")
if self._password:
self._read_until("Password: ")
self._write(self._password + "\n")
sleep(.1)
self._write("export PS1='%s'\n" % self._prompt)
self._read_until(self._prompt)
self._read_until(self._prompt)
self._write("export COLUMNS=1024\n")
self._read_until(self._prompt)
self._write("stty columns 1027\n")
self._read_until(self._prompt)
def do_disconnect(self):
self._telnet.close()
def _write(self, text):
self.log_spy_write(text)
self._telnet.write(text.encode('utf-8'))
def _read_until(self, marker):
out = self._telnet.read_until(marker.encode('utf-8'))
self.log_spy_read(out)
return out
def readline(self):
choices = [ "\n", self._prompt ]
if version_info[0] > 2: choices = [ bytes(x, 'utf-8') for x in choices ]
(index, _, line) = self._telnet.expect(choices)
self.log_spy_read(line.decode('utf-8').rstrip("\n\r"))
if index == 0:
return line
return None
def execute_command(self, command, env={}, wait=True, check_err=False, cwd=None):
wrapped_command = PrefixedStreamReader.wrap_command(command, env, cwd)
self._write(wrapped_command + "\n")
self.readline()
sleep(.2)
queue = Queue()
PrefixedStreamReader(self, queue)
return ShellResult(self, command, queue, wait, check_err)
def do_reboot(self):
self._write("reboot\n")
sleep(.3)
|
nilq/baby-python
|
python
|
"""
二分探索
<最悪実行時間に関する漸化式>
T(n) = | Θ(1) if n = 1
| T(n/2) + c if n > 1
ループの度に検査範囲が半減するので、Θ(lgn)となる。
"""
def binary_search(A, v):
left = 0
right = len(A) - 1
while left <= right:
i = (left + right) // 2
if v < A[i]:
right = i - 1
elif A[i] < v:
left = i + 1
else:
return i
else:
return None
|
nilq/baby-python
|
python
|
#
# RawIO
# Copyright (c) 2021 Yusuf Olokoba.
#
from cv2 import findTransformECC, MOTION_TRANSLATION, TERM_CRITERIA_COUNT, TERM_CRITERIA_EPS
from numpy import asarray, eye, float32
from PIL import Image
from sklearn.feature_extraction.image import extract_patches_2d
from typing import Callable
def markov_similarity (min_probability: float=0.8, trials: int=100, patch_size: float=0.1) -> Callable[[str, str], bool]:
"""
Create a similarity function which estimates a binomial distribution on a Markov random field defined over the image.
In simple terms, it checks for patch correspondences :/
We use Evangelidis & Psarakis, 2008 with Monte Carlo simulation to estimate the binomial distribution.
Parameters:
min_probability (float): Minimum probability for images to be considered similar, in range [0., 1.].
trials (int): Number of Monte Carlo trials for estimating the binomial distribution.
patch_size (float): Relative patch size for ECC trials, in range [0., 1.].
Returns:
callable: Pairwise image similarity function returning a boolean.
"""
def similarity_fn (path_a: str, path_b: str) -> bool:
# Load images
image_a = Image.open(path_a)
image_b = Image.open(path_b)
# Check sizes
if image_a.size != image_b.size:
return False
# Load images
image_a.draft("L", (2560, 1440))
image_b.draft("L", (2560, 1440))
image_a = asarray(image_a)
image_b = asarray(image_b)
# Extract patches
SEED = 1
size = int(min(image_a.shape) * patch_size)
patches_a = extract_patches_2d(image_a, (size, size), max_patches=trials, random_state=SEED)
patches_b = extract_patches_2d(image_b, (size, size), max_patches=trials, random_state=SEED)
# Run Monte Carlo estimation
IDENTITY = eye(2, 3, dtype=float32)
CRITERIA = (TERM_CRITERIA_EPS | TERM_CRITERIA_COUNT, 50, 1e-4)
passes = 0
for patch_a, patch_b in zip(patches_a, patches_b):
try:
findTransformECC(patch_a, patch_b, IDENTITY.copy(), MOTION_TRANSLATION, CRITERIA, None, 5)
passes += 1
except:
pass
# Check
estimator = passes / patches_a.shape[0]
return estimator >= min_probability
return similarity_fn
|
nilq/baby-python
|
python
|
import numpy as np
from glob import glob
import os
from sklearn.model_selection import train_test_split
base_path = "/media/ml/data_ml/EEG/deepsleepnet/data_npy"
files = glob(os.path.join(base_path, "*.npz"))
train_val, test = train_test_split(files, test_size=0.15, random_state=1337)
train, val = train_test_split(train_val, test_size=0.1, random_state=1337)
train_dict = {k: np.load(k) for k in train}
test_dict = {k: np.load(k) for k in test}
val_dict = {k: np.load(k) for k in val}
|
nilq/baby-python
|
python
|
from ..game import Actor
def test_id():
actor1 = Actor()
actor2 = Actor()
assert actor1.id
assert actor2.id
assert actor1.id != actor2.id
def test_add_food():
actor = Actor()
assert actor.food == 0
actor.add_food(10)
assert actor.food == 10
actor.add_food(5)
assert actor.food == 15
def test_take_food():
actor = Actor()
assert actor.food == 0
actor.add_food(10)
assert actor.food == 10
assert actor.take_food() == 10
assert actor.food == 0
def test_set_position():
actor = Actor()
actor.set_position((0, 0))
assert actor.position == (0, 0)
actor.set_position((5, 5))
assert actor.position == (5, 5)
def test_set_owner():
actor = Actor()
actor.set_owner("foo")
assert actor.owner_id == "foo"
actor.set_owner("bar")
assert actor.owner_id == "bar"
def test_move():
actor = Actor().set_position((0, 0))
assert actor.position == (0, 0)
actor.move((1, 0))
actor.move((1, 0))
assert actor.position == (1, 0)
actor.move((1, 1))
actor.move((1, 1))
assert actor.position == (1, 1)
def test_health():
actor = Actor()
assert actor.health > 0
def test_missing_health():
actor = Actor()
assert actor.missing_health == 0
actor.deal_damage(1)
assert actor.missing_health == 1
def test_missing_heal():
actor = Actor()
assert actor.missing_health == 0
actor.deal_damage(10)
assert actor.missing_health == 10
actor.heal(5)
assert actor.missing_health == 5
actor.heal(5)
assert actor.missing_health == 0
def test_alive():
actor = Actor()
assert actor.alive
actor.deal_damage(actor.health)
assert not actor.alive
def test_dead():
actor = Actor()
assert not actor.dead
actor.deal_damage(actor.health)
assert actor.dead
|
nilq/baby-python
|
python
|
import deepmerge
import functools
import importlib
import logging
import yaml
from inspect import getmembers, isfunction
from urllib import parse, request
from wcmatch import fnmatch
from . import macros, middleware
log = logging.getLogger("netbox_rbac")
# Collect all public functions from macros.
functions = [
(name, fn)
for name, fn in getmembers(macros, isfunction)
if not name.startswith("_")
]
# In order to guard against external dependencies, such as Gitlab, being
# unavailable, store the last configuration.
config = None
def load(paths):
global config
errors = []
for path in paths:
try:
path = parse.urlparse(path, scheme="file")
data = request.urlopen(path.geturl())
config = Rule(yaml.safe_load(data))
return config
except Exception as err:
errors.append("%s: %s" % (path.geturl(), err))
log.warn("load: no valid rules found: %s", errors)
# Unable to load a new config, so return the current one.
return config
class Rule:
def __init__(self, config):
self.roles = {}
# Although YAML provides a mechanism for referencing one object from
# another, it doesn't support deep merging, so we handle that manually.
for name, role in config.items():
for base in role.get("base", []):
deepmerge.always_merger.merge(role, config[base])
# Ignore template roles.
if "groups" in role:
self.roles[name] = Role(name, **role)
# Given the user's roles, the requested permission, and the object, returns
# whether or not the operation is allowed.
def has_perm(self, roles, perm, obj):
for role in roles:
role = self.roles.get(role)
# Permission is granted when:
# * The role is valid (defined in the configuration).
# * The requested permission is granted by the role.
# * The rule evaluates to True on the object.
if role and role.has_perm(perm) and role.eval(obj):
return True
return False
class Role:
def __init__(self, name, **kwargs):
self.name = name
self.context = kwargs.get("context", {})
self.groups = kwargs.get("groups", [])
self.imports = kwargs.get("imports", [])
self.perms = kwargs.get("perms", [])
self.rule = kwargs.get("rule")
if self.rule:
self.code = compile(self.rule, "<string>", "eval")
else:
self.code = None
# Returns the result of evaluating the rule on the object, if both are
# defined. Returns True otherwise.
def eval(self, obj):
if self.code and obj:
context = {**self.context, "obj": obj}
for name in self.imports:
context[name] = importlib.import_module(name)
for name, fn in functions:
context[name] = functools.partial(fn, obj)
context.update(
{"fnmatch": fnmatch.fnmatch, "request": middleware.request(),}
)
return eval(self.code, context)
return True
# Returns whether or not this role provides the requested permission.
def has_perm(self, perm):
return fnmatch.fnmatch(perm, self.perms)
|
nilq/baby-python
|
python
|
from django.core.paginator import Paginator
from django.shortcuts import redirect, render, get_object_or_404
from comps.models.comp import Comp
from comps.models.heat import Heat
from comps.models.heatlist_error import Heatlist_Error
def delete_heatlist_error(request, error_id):
heatlist_error = get_object_or_404(Heatlist_Error, pk=error_id)
comp = heatlist_error.comp
heatlist_error.delete()
return redirect("comps:show_heatlist_errors", comp.id)
def check_heatlist_error(request, error_id):
heatlist_error = get_object_or_404(Heatlist_Error, pk=error_id)
heat = heatlist_error.heat
if heatlist_error.error == Heatlist_Error.UNKNOWN_LEVEL:
if heat.base_value > 0:
heatlist_error.delete()
elif heatlist_error.error == Heatlist_Error.UNKNOWN_STYLE:
if heat.style != Heat.UNKNOWN:
heatlist_error.delete()
return redirect('comps:heat', heat.id)
def show_heatlist_errors(request, comp_id):
comp = get_object_or_404(Comp, pk=comp_id)
heatlist_errors = Heatlist_Error.objects.filter(comp=comp).order_by('error', 'dancer', 'heat__heat_number')
paginator = Paginator(heatlist_errors, 16)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "comps/show_heatlist_errors.html", {'comp': comp, 'page_obj': page_obj, })
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.6 on 2020-06-12 16:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('My_Account', '0017_auto_20200612_1522'),
]
operations = [
migrations.RenameField(
model_name='sharing_image',
old_name='Likes',
new_name='Amazing_Num',
),
migrations.RenameField(
model_name='sharing_image',
old_name='Loves',
new_name='Good_Num',
),
migrations.RenameField(
model_name='sharing_post',
old_name='Likes',
new_name='Amazing_Num',
),
migrations.RenameField(
model_name='sharing_post',
old_name='Loves',
new_name='Good_Num',
),
]
|
nilq/baby-python
|
python
|
import pygame
from pygame.locals import *
from GUI.Colours import Colours
from GUI.Measures import Measures
from GUI.Point import Point
from GUI.BarPoint import BarPoint
from GUI.BearoffPoint import BearoffPoint
from GUI.Checker import Checker
class GUIBoard:
def __init__(self):
pass
def generate_board_surface(self):
boardSurf = pygame.Surface((Measures.BOARDWIDTH.value, Measures.BOARDHEIGHT.value))
boardSurf.fill(Colours.SOFT_TEAL.value)
# quadrants
leftHalf = Rect(Measures.BORDERSIZE.value, Measures.BORDERSIZE.value, Measures.QUADRANTWIDTH.value,
Measures.QUADRANTHEIGHT.value)
rightHalf = Rect(Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value,
Measures.BORDERSIZE.value, Measures.QUADRANTWIDTH.value, Measures.QUADRANTHEIGHT.value)
pygame.draw.rect(boardSurf, Colours.TEAL.value, leftHalf)
pygame.draw.rect(boardSurf, Colours.TEAL.value, rightHalf)
self.draw_board_triangles(boardSurf)
self.create_points()
return boardSurf
def draw_board_triangles(self, surface):
width = ['left', 'right']
height = ['top', 'bottom']
for i in range(2):
for j in range(2):
self.draw_triangles(surface, width[i], height[j])
@staticmethod
def draw_triangles(surface, width, height):
if width == 'left':
x = Measures.BORDERSIZE.value
else:
x = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value
if height == 'top':
y = Measures.BORDERSIZE.value
tip = y + Measures.TRIANGLEHEIGHT.value
else:
y = Measures.BORDERSIZE.value + Measures.QUADRANTHEIGHT.value
tip = y - Measures.TRIANGLEHEIGHT.value
left_point = (x, y)
right_point = (x + Measures.TRIANGLEWIDTH.value, y)
tip_point = (x + (Measures.TRIANGLEWIDTH.value / 2), tip)
for i in range(6):
points = [left_point, right_point, tip_point]
if i % 2 == 0 and height == 'top' or i % 2 != 0 and height == 'bottom':
pygame.draw.polygon(surface, Colours.ORANGE.value, points)
else:
pygame.draw.polygon(surface, Colours.ORANGE.value, points, 2)
left_point = right_point
right_point = (left_point[0] + Measures.TRIANGLEWIDTH.value, y)
tip_point = (tip_point[0] + Measures.TRIANGLEWIDTH.value, tip)
@staticmethod
def create_points():
points = []
point = BearoffPoint(((Measures.QUADRANTWIDTH.value * 2) + Measures.BARSIZE.value,
Measures.BOTTOMHEIGHT.value - Measures.BORDERSIZE.value),
0, (Measures.BORDERSIZE.value, Measures.BOARDHEIGHT.value + Measures.BORDERSIZE.value))
points.append(point)
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.TRIANGLEWIDTH.value
y = Measures.BOARDHEIGHT.value - Measures.BORDERSIZE.value - Measures.TRIANGLEHEIGHT.value
for i in range(1, 7):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.QUADRANTWIDTH.value \
- Measures.BARSIZE.value - Measures.TRIANGLEWIDTH.value
y = Measures.BOARDHEIGHT.value - Measures.BORDERSIZE.value - Measures.TRIANGLEHEIGHT.value
for i in range(7, 13):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BORDERSIZE.value
y = Measures.BORDERSIZE.value
for i in range(13, 19):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x + Measures.TRIANGLEWIDTH.value
x = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value
y = Measures.BORDERSIZE.value
for i in range(19, 25):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x + Measures.TRIANGLEWIDTH.value
point = BarPoint((Measures.BARSIZE.value, Measures.QUADRANTHEIGHT.value), 25,
(Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value, Measures.BORDERSIZE.value))
points.append(point)
return points
@staticmethod
def create_checkers(points):
board = [[0, 0, 0, 0, 0, 5, 0, 3, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 5, 0, 3, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]]
for i in range(24):
if board[0][i] > 0:
for c in range(board[0][i]):
checker = Checker(Colours.BLACK.value)
points[i+1].checkers.append(checker)
for i in range(24):
if board[1][i] > 0:
for c in range(board[1][i]):
checker = Checker(Colours.WHITE.value)
points[24-i].checkers.append(checker)
@staticmethod
def draw_point_numbers(surface):
font = pygame.font.Font(None, 18)
x, y = Measures.BORDERSIZE.value + 20, 15
for i in range(12, 18):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x + Measures.TRIANGLEWIDTH.value
x, y = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value + 20, 15
for i in range(18, 24):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x + Measures.TRIANGLEWIDTH.value
x, y = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - 28, Measures.BOARDHEIGHT.value - 25
for i in range(0, 6):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.QUADRANTWIDTH.value - \
Measures.BARSIZE.value - 30
y = Measures.BOARDHEIGHT.value - 25
for i in range(6, 12):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x - Measures.TRIANGLEWIDTH.value
|
nilq/baby-python
|
python
|
from cloud_scanner_azure.config.azure_credential_config import (
AzureCredentialConfig)
class AzureResourceServiceConfig:
"""Configuration required for usage of AzureResourceService."""
def __init__(self, subscription_id, creds: AzureCredentialConfig):
self.credentials = creds
self.subscription_id = subscription_id
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Imports
import sys,re,os
import glob
import numpy as n
# Script information
__author__ = "Sergi Rodà Llordés"
__version__ ="1.0"
__maintainer__="Sergi Rodà Llordés"
__email__="sergi.rodallordes@bsc.es"
class pKa:
def __init__(self,PDB,Ser_residue):
self.__PDB = PDB
self.__Ser_residue = Ser_residue
self.__Results = {}
self.__pI_folded = 0
self.__pI_unfolded = 0
self.__pI_active_site = 0
@property
def PDB(self):
return self.__PDB
@property
def Ser_residue(self):
return self.__Ser_residue
@property
def pI(self):
return [self.__pI_folded,self.__pI_unfolded,self.__pI_active_site]
def propka(self):
"""
Take the PDB file and calculate the pKa of titrable residues using propka
PARAMETERS
----------
PDB : string
PDB file that wants to be added to the analysis
OUTPUT
------
Results : dict of titrable residues with the calculated pKa
pI_folded: The isoelectric point of the protein in the folded state
pI_unfolded: The isoelectric point of the protein in the unfolded state
"""
index_pKa1,index_pKa2 = 0,0
try:
os.system("propka31 %s -q"%self.__PDB)
print("Computing pI values...")
except:
print("propka is not installed. To install it git clone the following repository: https://github.com/jensengroup/propka-3.1")
print("Then: python setup.py install --user")
exit()
else:
os.system("rm *.propka_input")
pKa_file = open("%s.pka" %self.__PDB[self.__PDB.rindex("/")+1:-4])
for line in pKa_file:
if "SUMMARY OF THIS PREDICTION" in line:
index_pKa1=1
continue
if index_pKa1!=0:
index_pKa2=index_pKa1
index_pKa1=0
continue
if index_pKa2!=0:
self.__Results[line[3:6]+"_"+line[7:10]] = [int(line[7:10]),float(line[16:21])]
if "N+" in line and index_pKa2!=0:
self.__Results[line[3:6]+"_"+line[7:10]] = [int(line[7:10]), float(line[16:21])]
index_pKa2=0
if "The pI is " in line:
self.__pI_folded, self.__pI_unfolded = float(line[10:15]), float(line[29:34])
os.system("rm *.pka")
def Neighbouratoms(self):
"""
Take the atoms near the active site to compute the pI around this area
PARAMETERS
----------
PDB : string
PDB file that wants to be added to the analysis
Ser_residue : int
Index or number referring to the catalytic Ser residue
Results : dict
dict of titrable residues with the calculated pKa
OUTPUT
------
pI_active_site : pI of the active site and surroundings (10 Å)
"""
Aux_results,values = {},[]
# Get the coordinates of the Ser residue to look for the neighbour titrable residues
PDB_file = open(self.__PDB, "rt")
for line in PDB_file:
if line[17:20]=="SER" and int(self.__Ser_residue)==int(line[23:26]) and "OG" in line:
x,y,z = float(line[30:38]),float(line[38:46]),float(line[46:54])
# Get the neighbour residues and store them with the pKa value
PDB_file = open(self.__PDB, "rt")
for line in PDB_file:
if "TER" in line:
pass
elif "ATOM" in line:
x_aux, y_aux, z_aux = float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip())
if n.sqrt((x-x_aux)**2+(y-y_aux)**2+(z-z_aux)**2)<=float(10):
if line[17:20]+"_"+line[23:26] in self.__Results:
Aux_results[line[17:20]+"_"+line[23:26]] = self.__Results[line[17:20]+"_"+line[23:26]]
else:
pass
self.__Results = Aux_results
for value in list(Aux_results.values()):
values.append(value[1])
self.__pI_active_site = n.mean(values)
def computepI(self):
"""
It executes the methods of the class sequentially,
returning the 3 computed values of pI
"""
self.propka()
self.Neighbouratoms()
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from products.models import Product, Category
class CategorySerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Category
fields = ("id", "name")
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = (
"id",
"name",
"price",
"quantity",
"featured",
"description",
"picture",
"slug",
)
lookup_field = "slug"
|
nilq/baby-python
|
python
|
import datetime
import json
import discord
from discord.ext import commands
import requests
'''
スプラトゥーン2絡みのコマンド
'''
class Splatoon2(commands.Cog, name='スプラトゥーン2'):
def __init__(self, bot):
self.bot = bot
@commands.command(name='バイトシフト')
async def say_salmon_schedule(self, ctx):
'''サーモンランのスケジュールを出す'''
url = 'https://spla2.yuu26.com/coop/schedule'
ua = 'ShiroBot/1.0 (@frfr@mstdn.f72u.net)'
headers = {'User-Agent': ua}
template = '''
直近:
・ステージ: {0}
・時間: {1}~{2}
・ブキ: {3}
次:
・ステージ: {4}
・時間: {5}~{6}
・ブキ: {7}
'''
ret = requests.get(url, headers=headers)
if (ret.status_code == 200):
# OK
data = ret.json()['result']
await ctx.send(template.format(
data[0]['stage']['name'],
data[0]['start'],
data[0]['end'],
','.join([data[0]['weapons'][0]['name'], data[0]['weapons'][1]['name'], data[0]['weapons'][2]['name'], data[0]['weapons'][3]['name']]),
data[1]['stage']['name'],
data[1]['start'],
data[1]['end'],
','.join([data[1]['weapons'][0]['name'], data[1]['weapons'][1]['name'], data[1]['weapons'][2]['name'], data[1]['weapons'][3]['name']])
))
else:
# NG
await ctx.send('バイトデータの取得に失敗しました')
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
from django_countries.fields import CountryField
from django.contrib.gis.db import models
from django.db.models import Manager as GeoManager
class Site(models.Model):
id = models.AutoField(primary_key=True)
site = models.CharField('Site name', max_length=255, blank=False, null=True)
country = CountryField('Country', blank=True, null=True)
data_source = models.CharField('Data Source', max_length=50, blank=True, null=True)
#latitude = models.FloatField('Latitude', blank=True, null=True)
#longitude = models.FloatField('Longitude', blank=True, null=True)
altitude = models.FloatField('Altitude', blank=True, null=True)
site_types = (('Shelter', 'Shelter'), ('Cave', 'Cave'), ('Open-air', 'Open-air'), ('Unknown', 'Unknown'))
site_type = models.CharField('Site type', max_length=20, blank=True, null=True, choices=site_types)
display = models.BooleanField('Flagged', blank=True, null=True)
map_location = models.PointField(dim=2, blank=True, null=True)
objects = GeoManager()
notes = models.TextField(blank=True, null=True)
def longitude(self):
if self.map_location:
return self.map_location.x
else:
return None
def latitude(self):
if self.map_location:
return self.map_location.y
else:
return None
def min_date(self):
dates = Date.objects.filter(site=self) # queryset of date objects
date_list = [d.date for d in dates] # list of dates from date objects
date_list = list(filter(None, date_list)) # remove pesky None values
if date_list: # if the date list contains anything
result = min(date_list) # then get the min
else:
result = None # otherwise return None
return result
def max_date(self):
dates = Date.objects.filter(site=self) # queryset of date objects
date_list = [d.date for d in dates] # list of dates from date objects
date_list = list(filter(None, date_list)) # remove pesky None values
if date_list: # if the date list contains anything
result = max(date_list) # then get the max
else:
result = None # otherwise return None
return result
class Meta:
managed = True
#db_table = 'sites'
def __unicode__(self):
return u'%s, %s' % (self.site, self.country)
def __str__(self):
return f'[{self.id}] {self.site}, {self.country}'
class Date(models.Model):
site = models.ForeignKey(Site, on_delete=models.CASCADE)
layer = models.CharField('Layer', max_length=300, blank=True, null=True)
industry = models.CharField('Industry', max_length=100, blank=True, null=True)
industry_2 = models.CharField('Industry', max_length=100, blank=True, null=True)
industry_3 = models.CharField('Industry', max_length=100, blank=True, null=True)
cat_no = models.CharField('Catalog Number', max_length=100, blank=True, null=True)
date = models.FloatField('Age', blank=True, null=True)
sd_plus = models.FloatField('SD Plus', blank=True, null=True)
sd_minus = models.FloatField('SD Minus', blank=True, null=True)
sample = models.CharField('Sample', max_length=100, blank=True, null=True)
technique = models.CharField('Method', max_length=100, blank=True, null=True)
corrected_date_BP = models.FloatField('Cal. Age BP', blank=True, null=True)
plus = models.FloatField('Cal. Plus', blank=True, null=True)
minus = models.FloatField('Cal. Minus', blank=True, null=True)
hominid_remains = models.TextField('Hominins', blank=True, null=True)
bibliography = models.TextField('Bibliography', blank=True, null=True)
period = models.CharField('Period', max_length=100, blank=True, null=True)
notes = models.TextField('Notes', blank=True, null=True)
intcal09_max = models.FloatField('IntCal09 Max. Age', blank=True, null=True)
intcal09_min = models.FloatField('IntCal09 Min. Age', blank=True, null=True)
class Meta:
managed = True
#db_table = 'dates'
def __unicode__(self):
return u'%s %s %s' % (self.site,self.layer,self.industry)
class Site_plus_dates(Site):
class Meta:
proxy = True
managed = True
verbose_name = "Sites and dates"
verbose_name_plural = "Sites and dates"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Pull out yearly precipitation
# Daryl Herzmann 26 Jul 2004
import pg, dbflib, mx.DateTime, shutil, shapelib
from pyIEM import wellknowntext
mydb = pg.connect('wepp','iemdb')
sts = mx.DateTime.DateTime(2005,3,1)
ets = mx.DateTime.DateTime(2005,11,1)
interval = mx.DateTime.RelativeDateTime(days=+7)
now = sts
twp = {}
rs = mydb.query("SELECT astext(transform(the_geom,4326)) as t, model_twp from iatwp ORDER by model_twp ASC").dictresult()
for i in range(len(rs)):
twp[ rs[i]["model_twp"] ] = rs[i]["t"]
while (now < ets):
print "Hello Heather, I am here ", now
shp = shapelib.create("weeklysm/%ssm" % (now.strftime("%Y%m%d"), ), shapelib.SHPT_POLYGON)
dbf = dbflib.create("weeklysm/%ssm" % (now.strftime("%Y%m%d"), ) )
dbf.add_field("S0-10CM", dbflib.FTDouble, 8, 2)
dbf.add_field("S10-20CM", dbflib.FTDouble, 8, 2)
dbf.add_field("VSM", dbflib.FTDouble, 8, 2)
rs = mydb.query("select model_twp, avg(vsm) as v, \
avg(s10cm) as s10, avg(s20cm) as s20 from \
waterbalance_by_twp WHERE valid >= '%s' and valid < '%s' \
GROUP by model_twp ORDER by model_twp ASC" % ( \
now.strftime("%Y-%m-%d"), (now+interval).strftime("%Y-%m-%d")\
) ).dictresult()
for i in range(len(rs)):
m = rs[i]['model_twp']
f = wellknowntext.convert_well_known_text( twp[m] )
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(i, (rs[i]['s10'],rs[i]['s20'],rs[i]['v']) )
del dbf
del shp
shutil.copy("static/hrap_point_4326.prj", "weeklysm/%ssm.prj" % (now.strftime("%Y%m%d"), ) )
now += interval
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Simple websocket server to perform signaling.
#
import asyncio
import binascii
import os
import websockets
clients = {}
async def echo(websocket, path):
client_id = binascii.hexlify(os.urandom(8))
clients[client_id] = websocket
try:
async for message in websocket:
for c in clients.values():
if c != websocket:
await c.send(message)
finally:
clients.pop(client_id)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, '0.0.0.0', 8765))
asyncio.get_event_loop().run_forever()
|
nilq/baby-python
|
python
|
import sys
import traceback
import logging
from glass import http
from . import highlight
ERROR_TEMPLATE = '''
<title>{code} {title}</title>
<h1>{title}</h1>
<p>{description}</p>
'''
logger = logging.getLogger('glass.app')
class HTTPError(Exception):
code = 500
description = "Internal Server Error"
def __init__(self, description='', code=None):
self.description = description or self.description
self.code = code or self.code
super().__init__(self.description)
def get_response(self, debug=False):
if debug and None not in sys.exc_info():
return self._format_tb(traceback.format_exc())
return self._format_response()
def _format_response(self):
title = http.HTTP_STATUS_CODES.get(self.code, "Error")
response = ERROR_TEMPLATE.format(code=self.code,
title=title,
description=self.description)
return response
def headers(self):
header = [('Content-Type', 'text/html; charset=utf-8')]
return header
def _format_tb(self, tb):
html = ['<html><body> <h1> Server Error</h1>']
try:
html.append(highlight.highlight(tb, 'python'))
except Exception as e:
logger.info('Failed to highlight traceback [%s]' % e)
html.append(tb)
html.append('''
<h3>Note: You are seeing this traceback because
<b>Debug</b> is set to True.</h3>''')
html.append('</body></html>')
return ''.join(html)
class HTTP404(HTTPError):
code = 404
description = 'The requested url not found on this server'
class MethodNotAllow(HTTPError):
code = 405
description = 'The method not allow for the requested path'
class InternalServerError(HTTPError):
code = 500
description = """
Internal Server Error. An error occurs
while processing request
"""
class BadRequest(HTTPError):
code = 403
description = '''Bad Request'''
class RequestTooLarge(HTTPError):
code = 413
description = 'Payload Too Large'
|
nilq/baby-python
|
python
|
"""
An evolving population of genotypes(Ideally for optimizing network
hyperparameters).
See genotype constraint docs in spikey/meta/series.
Examples
--------
.. code-block:: python
metagame = EvolveNetwork(GenericLoop(network, game, **params), **metagame_config,)
population = Population(metagame, **pop_config)
while not population.terminated:
fitness = population.evaluate()
population.update(fitness)
print(f"{population.epoch} - Max fitness: {max(fitness)}")
"""
import os
from copy import copy, deepcopy
import numpy as np
from spikey.module import Module, Key
from spikey.meta.backends.default import MultiprocessBackend
from spikey.logging import log, MultiLogger
class GenotypeMapping(Module):
"""
Cache genotype-fitness matchings.
Parameters
----------
n_storing: int
Number of genotypes to store
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
def __init__(self, n_storing: int):
self.n_storing = n_storing
self.genotypes = []
self.fitnesses = []
def __getitem__(self, genotype: dict) -> float:
"""
Pull value for specific genotype from cache.
Parameters
----------
genotype: dict
Genotype to pull cached value of.
Returns
-------
float or None The cached fitness of the genotype or None.
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
genotype_no_age = copy(genotype)
if "_age" in genotype_no_age:
del genotype_no_age["_age"]
if genotype_no_age not in self.genotypes:
return None
idx = self.genotypes.index(genotype_no_age)
fitness = self.fitnesses[idx]
self.update(genotype, fitness)
return fitness
def update(self, genotype: dict, fitness: float):
"""
Update cache with result.
Parameters
----------
genotype: dict
Genotype to use as cache key.
fitness: float
Fitness of genotype given.
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
if not self.n_storing:
return
# shallow copy ok -- only robust to del age in copy
# mutate, crossover use deepcopy so ok here
genotype_no_age = copy(genotype) # deepcopy(genotype)
if "_age" in genotype_no_age:
del genotype_no_age["_age"]
self.genotypes.append(genotype_no_age)
self.fitnesses.append(fitness)
assert len(self.genotypes) == len(self.fitnesses), "Cache broken!"
if len(self.genotypes) >= self.n_storing:
self.genotypes = self.genotypes[-self.n_storing :]
self.fitnesses = self.fitnesses[-self.n_storing :]
def run(
fitness_func: callable,
cache: GenotypeMapping,
genotype: dict,
log_fn: callable,
filename: str,
) -> (float, bool):
"""
Parameters
----------
fitness_func: callable
Function to determine fitness of genotype.
cache: GenotypeMapping
Genotype-fitness cache.
genotype: dict
Current genotype to test.
Returns
-------
fitness: float, terminate: bool
"""
fitness = cache[genotype]
if fitness is not None:
terminate = False
else:
fitness, terminate = fitness_func(genotype)
if filename:
results = {
"fitness": fitness,
"filename": filename,
}
log_fn(
None,
None,
results=results,
info=genotype,
filename=filename,
)
cache.update(genotype, fitness)
return fitness, terminate
def checkpoint_population(population: object, folder: str = "."):
"""
Checkpoint current epoch of population in file.
Parameters
----------
population: Population
Population to checkpoint.
folder: str
Folder to store checkpoint file.
"""
from pickle import dump as pickledump
if folder:
try:
os.makedirs(folder)
print(f"Created directory {folder}!")
except FileExistsError:
pass
if hasattr(population, "multilogger"):
file_header = population.multilogger.prefix
else:
file_header = ""
filename = f"{file_header}~EPOCH-({population.epoch:03d}).obj"
with open(os.path.join(folder, filename), "wb") as file:
pickledump(population, file)
def read_population(folder: str = ".") -> list:
"""
Read genotypes & fitnesses from last epoch and use it.
Parameters
----------
folder: path
Folder to find most recent checkpoint from.
Returns
-------
Population Saved population object.
"""
from pickle import load as pickleload
relevant_filenames = []
for filename in os.listdir(folder):
if "EPOCH" in filename:
relevant_filenames.append(filename)
if not relevant_filenames:
raise ValueError(f"Could not find an previous EPOCH data in {folder}!")
relevant_filenames.sort()
with open(os.path.join(folder, relevant_filenames[-1]), "rb") as file:
population = pickleload(file)
return population
class Population(Module):
"""
An evolving population.
See genotype constraint docs in spikey/meta/series.
Parameters
----------
game: MetaRL
MetaRL game to evolve agents for.
backend: MetaBackend, default=MultiprocessBackend(max_process)
Backend to execute experiments with.
max_process: int, default=16
Number of separate processes to run experiments for
default backend.
kwargs: dict, default=None
Any configuration, required keys listed in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
metagame = EvolveNetwork(GenericLoop(network, game, **params), **metagame_config,)
population = Population(metagame, **pop_config)
while not population.terminated:
fitness = population.evaluate()
population.update(fitness)
print(f"{population.epoch} - Max fitness: {max(fitness)}")
"""
NECESSARY_KEYS = [
Key("n_storing", "Number of genotypes to store in cache.", int),
Key(
"n_agents",
"Number of agents in population per epoch.",
(int, list, tuple, np.ndarray),
),
Key(
"n_epoch",
"Number of epochs -- unused if n_agents is iterable.",
int,
default=9999,
),
Key(
"mutate_eligable_pct",
"(0, 1] Pct of prev agents eligable to be mutated.",
float,
),
Key(
"max_age",
"Max age agent can reach before being removed from mutation/crossover/survivor pools.",
int,
),
Key(
"random_rate",
"(0, 1) Percent agents in population to generate randomly.",
float,
),
Key(
"survivor_rate",
"(0, 1) Percent(new generation) previous generation preserved/turn.",
float,
),
Key(
"mutation_rate",
"(0, 1) Percent(new generation) previous generation mutated/turn.",
float,
),
Key(
"crossover_rate",
"(0, 1) Percent(new generation) previous generation crossed over/turn.",
float,
),
Key("logging", "Whether to log or not.", bool, default=True),
Key("log_fn", "f(n, g, r, i, filename) Logging function.", default=log),
Key("folder", "Folder to save logs to.", str, default="log"),
]
def __init__(
self,
game: object,
backend: object = None,
max_process: int = 16,
**config,
):
super().__init__(**config)
self.genotype_constraints = game.GENOTYPE_CONSTRAINTS
self.get_fitness = game.get_fitness
self.backend = backend or MultiprocessBackend(max_process)
if isinstance(self._n_agents, (list, tuple, np.ndarray)):
self.n_agents = list(self._n_agents)
else:
self.n_agents = [self._n_agents for _ in range(self._n_epoch)]
self.epoch = 0 # For summaries
self.terminated = False
self.cache = GenotypeMapping(self._n_storing)
self.population = [self._random() for _ in range(self.n_agents[self.epoch])]
if self._mutate_eligable_pct == 0:
raise ValueError("mutate_eligable pct cannot be 0!")
self._normalize_rates()
if self._logging:
self._setup_logging(config, game.params)
def _normalize_rates(self):
"""
Normalize pertinent algorithm rates to 1.
"""
total = (
self._random_rate
+ self._survivor_rate
+ self._mutation_rate
+ self._crossover_rate
)
if not total:
raise ValueError(
"Need nonzero value for the survivor, mutation or crossover rate."
)
self._random_rate /= total
self._survivor_rate /= total
self._mutation_rate /= total
self._crossover_rate /= total
def _setup_logging(self, pop_params, game_params):
self.multilogger = MultiLogger(folder=self._folder)
info = {"population_config": pop_params}
info.update({"metagame_info": game_params})
self.multilogger.summarize(results=None, info=info)
def __len__(self) -> int:
return len(self.population)
def _genotype_dist(self, genotype1: dict, genotype2: dict) -> float:
"""
Testing Population._genotype_dist.
Parameters
----------
genotype1: genotype
Genotypes to find the distance between.
genotype2: genotype
Genotypes to find the distance between.
Returns
-------
Euclidean distance between the two genotypes.
"""
total = 0
for key in self.genotype_constraints.keys():
if isinstance(genotype1[key], (list, tuple)):
for i in range(len(genotype1[key])):
total += (genotype1[key][i] - genotype2[key][i]) ** 2
continue
total += (genotype1[key] - genotype2[key]) ** 2
return total ** 0.5
def _random(self) -> dict:
"""
Randomly generate a genotype given constraints.
"""
eval_constraint = (
lambda cons: np.random.uniform(*cons)
if isinstance(cons, tuple)
else cons[np.random.choice(len(cons))]
)
genotype = {
key: eval_constraint(constraint)
for key, constraint in self.genotype_constraints.items()
}
genotype["_age"] = 0
return genotype
def _mutate(self, genotypes: list) -> list:
"""
Mutate a random key of each genotype given.
"""
if not isinstance(genotypes, (list, np.ndarray)):
genotypes = [genotypes]
new_genotypes = []
for genotype in genotypes:
new_genotype = deepcopy(genotype) ## prevent edit of original!
key = np.random.choice(list(self.genotype_constraints.keys()))
cons = self.genotype_constraints[key]
if isinstance(cons, tuple):
new_genotype[key] = np.random.uniform(*cons)
else:
new_genotype[key] = cons[np.random.choice(len(cons))]
new_genotype["_age"] = 0
new_genotypes.append(new_genotype)
return new_genotypes
def _crossover(self, genotype1: dict, genotype2: dict) -> [dict, dict]:
"""
Crossover two different genotypes.
Parameters
----------
genotype: dict, str: float
Genotype.
Returns
-------
2 new genotypes.
"""
offspring1, offspring2 = {}, {}
switch = False
switch_key = np.random.choice(list(self.genotype_constraints.keys()))
keys = list(self.genotype_constraints.keys())
np.random.shuffle(keys) # Prevent bias
for key in keys:
if key == switch_key:
switch = True
offspring1[key] = genotype1[key] if switch else genotype2[key]
offspring2[key] = genotype2[key] if switch else genotype1[key]
offspring1["_age"] = 0
offspring2["_age"] = 0
return [offspring1, offspring2]
def update(self, f: list):
"""
Update the population based on each agents fitness.
Parameters
----------
f: list of float
Fitness values for each agent.
"""
self.epoch += 1
try:
n_agents = self.n_agents[self.epoch]
except (StopIteration, IndexError):
self.terminated = True
return
prev_gen = [(self.population[i], f[i]) for i in range(len(f))]
prev_gen = sorted(prev_gen, key=lambda x: x[1])
prev_gen = [value[0] for value in prev_gen if value[0]["_age"] < self._max_age]
self.population = []
self.population += [
self._random() for _ in range(int(n_agents * self._random_rate))
]
if int(n_agents * self._survivor_rate): # -0 returns whole list!!
survivors = [
deepcopy(genotype)
for genotype in prev_gen[-int(n_agents * self._survivor_rate) :]
]
for genotype in survivors:
genotype["_age"] += 1
self.population += survivors
mutate_candidates = prev_gen[-int(self._mutate_eligable_pct * len(prev_gen)) :]
self.population += self._mutate(
[
deepcopy(genotype)
for genotype in np.random.choice(
mutate_candidates, size=int(n_agents * self._mutation_rate)
)
]
)
for _ in range(int(n_agents * self._crossover_rate) // 2):
genotype1 = np.random.choice(prev_gen)
genotype2 = np.random.choice(prev_gen)
self.population += self._crossover(deepcopy(genotype1), deepcopy(genotype2))
if len(self) < n_agents:
diff = n_agents - len(self)
self.population += self._mutate(np.random.choice(prev_gen, size=diff))
def evaluate(self) -> list:
"""
Evaluate each agent on the fitness function.
Returns
-------
Fitness values for each agent.
"""
params = [
(
self.get_fitness,
self.cache,
genotype,
self._log_fn,
next(self.multilogger.filename_generator) if self._logging else None,
)
for genotype in self.population
]
results = self.backend.distribute(run, params)
fitnesses = [result[0] for result in results]
terminated = [result[1] for result in results]
if any(terminated):
self.terminated = True
return fitnesses
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
packages = ['eda.' + p for p in find_packages('eda', exclude=['test', 'test*', '*.t'])]
packages.append('eda')
#packages=['eda', 'eda.components', 'eda.components.ST', 'eda.circuits'],
setup(
name='EDA',
version='1.0.1',
author='Paweł Wodnicki',
author_email='pawel@32bitmicro.com',
url='https://github.com/32bitmicro/EDA/',
license='BSD 3-clause',
description='EDA for generative design.',
test_suite='eda.tests.gen_test',
packages=packages
)
|
nilq/baby-python
|
python
|
import gym
import retro
import os
import numpy as np
from PIL import Image
from gym import spaces
from collections import deque
import cv2
SCRIPT_DIR = os.getcwd() #os.path.dirname(os.path.abspath(__file__))
# Taken from: https://gitlab.cs.duke.edu/mark-nemecek/vel/-/blob/cfa17ddd8c328331076b3992449665ccd2471bd3/vel/openai/baselines/common/atari_wrappers.py
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
# return frame[:, :, None]
frame = Image.fromarray(frame).convert('L').resize((self.width, self.height))
# self._display_last_frame(frame)
# frame = np.array(frame).astype(np.float32).reshape(1, self.width, self.height) / 255
frame = np.array(frame).astype(np.float32).reshape(1, self.width, self.height) / 255
return frame
def _display_last_frame(self, img):
img.show()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
# self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
# Taken from: https://github.com/openai/retro/blob/master/retro/examples/discretizer.py
class Discretizer(gym.ActionWrapper):
"""
Wrap a gym environment and make it use discrete actions.
Args:
combos: ordered list of lists of valid button combinations
"""
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = [False] * env.action_space.n # np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
if type(act) is list:
out = np.zeros((self.unwrapped.action_space.n,), dtype=bool) # [0] * self.unwrapped.action_space.n
for a in act:
dec_act = self._decode_discrete_action[a].copy()
out += dec_act
else:
out = self._decode_discrete_action[act].copy()
return out
# Define classes per game per buttons combo
class MarioDiscretizerSimple(Discretizer):
"""
Use Mario Bros specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
Buttons: ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
"""
def __init__(self, env):
combo_list = [[None], ['B'], ['A'], ['LEFT'], ['RIGHT']]
super().__init__(env=env, combos=combo_list)
class MarioDiscretizerComplex(Discretizer):
"""
Use Mario Bros specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
Buttons: ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
"""
def __init__(self, env):
# combo_list = [[None],['RIGHT'],['RIGHT', 'A'],['RIGHT', 'B'],['RIGHT', 'A', 'B'],['A'], ['LEFT'],['LEFT', 'A'],['LEFT', 'B'],['LEFT', 'A', 'B'],['DOWN'],['UP']]
combo_list = [[None],['RIGHT'],['RIGHT', 'A'],['RIGHT', 'B'],['RIGHT', 'A', 'B'],['A']]
super().__init__(env=env, combos=combo_list)
def setup_env(env_id, level_id):
retro.data.Integrations.add_custom_path(os.path.join(SCRIPT_DIR, "retro_integration"))
print(retro.data.list_games(inttype=retro.data.Integrations.CUSTOM_ONLY))
print(env_id in retro.data.list_games(inttype=retro.data.Integrations.CUSTOM_ONLY))
obs_type = retro.Observations.IMAGE # or retro.Observations.RAM
env = retro.make(env_id, level_id, record=False, inttype=retro.data.Integrations.CUSTOM_ONLY, obs_type=obs_type)
env = WarpFrame(env)
# env = FrameStack(env, 4)
return env
# x=setup_env("SMB-JU", "Level1-1")
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
fig, axes = plt.subplots(2, 1)
axes[0].set_title("Hammer projection")
map = Basemap(projection='hammer', lon_0 = 10, lat_0 = 50, ax=axes[0])
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
axes[1].set_title("Robinson projection")
map = Basemap(projection='robin', lon_0 = 10, lat_0 = 50, ax=axes[1])
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
plt.show()
|
nilq/baby-python
|
python
|
"""
Forward-ports of types from Python 2 for use with Python 3:
- ``basestring``: equivalent to ``(str, bytes)`` in ``isinstance`` checks
- ``dict``: with list-producing .keys() etc. methods
- ``str``: bytes-like, but iterating over them doesn't product integers
- ``long``: alias of Py3 int with ``L`` suffix in the ``repr``
- ``unicode``: alias of Py3 str with ``u`` prefix in the ``repr``
"""
from past import utils
if utils.PY2:
import __builtin__
basestring = __builtin__.basestring
dict = __builtin__.dict
str = __builtin__.str
long = __builtin__.long
unicode = __builtin__.unicode
__all__ = []
else:
from .basestring import basestring
from .olddict import olddict
from .oldstr import oldstr
long = int
unicode = str
# from .unicode import unicode
__all__ = ['basestring', 'olddict', 'oldstr', 'long', 'unicode']
|
nilq/baby-python
|
python
|
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import random
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
from skimage import io,transform
import matplotlib.pyplot as plt
import os
import torch
from torchvision import transforms
import numpy as np
import PIL.Image as Image
class CocoValPerson(Dataset): #继承Dataset
def __init__(self, cocoRoot="/disk2/mycode/common_data/coco", dataType="val2017", num_use=None): #__init__是初始化该类的一些基础参数
self.cocoRoot = cocoRoot
self.dataType = dataType
annFile = os.path.join(self.cocoRoot, f'annotations/instances_{self.dataType}.json')
print(f'Annotation file: {annFile}')
self.coco=COCO(annFile)
# 利用getCatIds函数获取某个类别对应的ID,
# 这个函数可以实现更复杂的功能,请参考官方文档
person_id = self.coco.getCatIds('person')[0]
print(f'"person" 对应的序号: {person_id}')
# 利用loadCats获取序号对应的文字类别
# 这个函数可以实现更复杂的功能,请参考官方文档
cats = self.coco.loadCats(1)
print(f'"1" 对应的类别名称: {cats}')
self.imgIds = self.coco.getImgIds(catIds=[1])
print(f'包含person的图片共有:{len(self.imgIds)}张')
# crowds filter
new_imgIds = []
for i in range(len(self.imgIds)):
imgId = self.imgIds[i]
annIds = self.coco.getAnnIds(imgIds=imgId, catIds=[1], iscrowd=True)
if len(annIds) == 0:
new_imgIds.append(imgId)
self.imgIds = new_imgIds
print(f'筛选掉crowds mask 的图片后,剩余:{len(self.imgIds)}张')
if num_use != None:
self.imgIds = self.imgIds[:num_use]
print(f'Only use {num_use} images')
def __len__(self):
return len(self.imgIds)
def __getitem__(self, index):
imgId = self.imgIds[index]
imgInfo = self.coco.loadImgs(imgId)[0]
imPath = os.path.join(self.cocoRoot, self.dataType, imgInfo['file_name'])
img = Image.open(imPath).convert('RGB')
img = transforms.Resize((500, 500))(img)
img = transforms.ToTensor()(img)
annIds = self.coco.getAnnIds(imgIds=imgId, catIds=[1])
anns = self.coco.loadAnns(annIds)
masks_tensor = torch.Tensor(14,500,500).fill_(-1)
box_tesnor = torch.Tensor(14,4).fill_(-1)
h_w_r_tensor = torch.Tensor(14).fill_(-1)
one_layer = torch.ones(1,500,500)
zero_layer = torch.zeros(1,500,500)
if len(annIds) >= 14:
print(imgInfo['file_name'])
# print(len(annIds))
for i in range(len(annIds)):
if anns[i]['iscrowd'] == 1:
print(imgInfo['file_name'])
print(len(annIds))
continue
mask = self.coco.annToMask(anns[i])
mask = torch.from_numpy(mask).float()
mask = transforms.ToPILImage()(mask)
mask = transforms.Resize((500, 500))(mask)
mask = transforms.ToTensor()(mask)
mask = torch.where(mask>0.5, one_layer, zero_layer)
masks_tensor[i] = mask
box = anns[i]['bbox']
h_w_r = box[3]/box[2]
box_trans = box.copy()
box_trans[0] = box[0]/imgInfo['width'] * 500
box_trans[1] = box[1]/imgInfo['height'] * 500
box_trans[2] = box[2]/imgInfo['width'] * 500
box_trans[3] = box[3]/imgInfo['height'] * 500
box_tesnor[i] = torch.Tensor(box_trans)
h_w_r_tensor[i] = h_w_r
# masks_area_sort_index = torch.sort(masks_area_tensor, descending=True)[1]
# masks_tensor_sort = masks_tensor[masks_area_sort_index]
# vali = torch.sum(torch.sum(masks_tensor_sort, dim=-1), dim=-1)
# masks_tensor_sort_top = masks_tensor_sort[:14]
# masks_tensor_sort_top_len = masks_tensor_sort_top.shape[0]
# masks_tensor_return = torch.Tensor(14,1,500,500).fill_(-1)
# masks_tensor_return[:masks_tensor_sort_top_len] = masks_tensor_sort[:masks_tensor_sort_top_len]
# if len(annIds) >= 14:
# mask = masks_tensor_return[0]
# mask = transforms.ToPILImage()(mask)
# mask.show()
return img, masks_tensor, box_tesnor, h_w_r_tensor
if __name__=='__main__':
data = CocoValPerson(dataType="val2017", num_use=10)
dataloader = DataLoader(data, batch_size=1,shuffle=False) #使用DataLoader加载数据
max_len = 0
for epoch in range(10):
for i_batch,batch_data in enumerate(dataloader):
if i_batch % 50 ==0:
img, masks, bboxes, h_w_r = batch_data
# masks_pil = transforms.ToPILImage()(masks[0,0])
# masks_pil.show()
bbox = bboxes[0,0]
cccc = masks[0,0].clone()
cccc[int(bbox[1]):int(bbox[1]+bbox[3]),int(bbox[0]):int(bbox[0]+bbox[2])] = 1
cccc_p = cccc+masks[0,0]
cccc_p = cccc_p/torch.max(cccc_p)
cccc_p_pil = transforms.ToPILImage()(cccc_p)
cccc_p_pil.show()
print(i_batch)
|
nilq/baby-python
|
python
|
import LAMMPyS as lp
steps = lp.Steps('test.dump')
step = steps[-1]
atoms = step.atoms
|
nilq/baby-python
|
python
|
def do_training(train, train_labels, test, test_labels, num_classes):
#set TensorFlow logging level to INFO
tf.logging.set_verbosity(tf.logging.INFO)
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
# Compute feature_columns from dataframe keys using list comprehension
feature_columns =
[tf.feature_column.numeric_column(key=key) for key in train.keys()],
hidden_units=[10, 10],
n_classes=num_classes)
# Train the Model
classifier.train(
input_fn=lambda:train_input_fn(train, train_labels,100),
steps=1000
)
# Evaluate the model
eval_result = classifier.evaluate(
input_fn=lambda:eval_input_fn(test, test_labels,100)
)
return (classifier, eval_result)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import abc
class Writer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def text(self, text):
pass
@abc.abstractmethod
def start(self, name, attributes=None):
pass
@abc.abstractmethod
def end(self, name):
pass
@abc.abstractmethod
def self_closing(self, name, attributes=None):
pass
@abc.abstractmethod
def append(self, html):
pass
@abc.abstractmethod
def as_string(self):
pass
|
nilq/baby-python
|
python
|
import utime
import machine
from machine import Timer, Pin, RTC
import time
import ntptime
import micropython
import config
import control
import mqtt_reporting
def set_ntp_time(timer):
ntptime.host = "tempus1.gum.gov.pl"
for i in range(1,10):
try:
t = ntptime.time()
tm = utime.localtime(t)
tm = tm[0:3] + (0,) + tm[3:6] + (0,)
RTC().datetime(tm)
break
except OSError:
continue
# def init_modules():
# config.init()
micropython.alloc_emergency_exception_buf(500)
machine.freq(160000000)
timer = Timer(-1)
timer.init(period=300*1000, mode=Timer.PERIODIC, callback=set_ntp_time)
set_ntp_time(None)
control.start(timer)
mqtt_reporting.setup()
print("\nGROWBOX INIT COMPLETE!")
while True:
mqtt_reporting.run()
time.sleep_ms(60*1000)
|
nilq/baby-python
|
python
|
import pytest
from pathlib import Path
from yalul.lex.scanners.grouping import GroupingScanner
from yalul.lex.token_type import TokenType
@pytest.fixture(scope='function')
def open_file(request):
return open(str(Path.cwd()) + "/tests/lex_examples/" + request.param)
class TestShouldLex:
def test_when_is_left_paren(self):
assert GroupingScanner.should_lex('(')
def test_when_is_right_paren(self):
assert GroupingScanner.should_lex(')')
def test_when_isnt_paren(self):
assert not GroupingScanner.should_lex('a')
class TestCreateToken:
@pytest.mark.parametrize('open_file', ['grouping_example.yalul'], indirect=['open_file'])
def test_create_token(self, open_file):
char = open_file.read(1)
scanner = GroupingScanner(char, open_file)
assert scanner.create_token().type == TokenType.LEFT_PAREN
assert scanner.create_token().type == TokenType.RIGHT_PAREN
|
nilq/baby-python
|
python
|
{"filter":false,"title":"bot.py","tooltip":"/bot.py","undoManager":{"mark":53,"position":53,"stack":[[{"start":{"row":163,"column":18},"end":{"row":163,"column":35},"action":"remove","lines":["BOT_USERNAME_HERE"],"id":2},{"start":{"row":163,"column":18},"end":{"row":163,"column":19},"action":"insert","lines":["k"]}],[{"start":{"row":163,"column":19},"end":{"row":163,"column":20},"action":"insert","lines":["r"],"id":3}],[{"start":{"row":163,"column":20},"end":{"row":163,"column":21},"action":"insert","lines":["i"],"id":4}],[{"start":{"row":163,"column":21},"end":{"row":163,"column":22},"action":"insert","lines":["t"],"id":5}],[{"start":{"row":163,"column":22},"end":{"row":163,"column":23},"action":"insert","lines":["i"],"id":6}],[{"start":{"row":163,"column":23},"end":{"row":163,"column":24},"action":"insert","lines":["b"],"id":7}],[{"start":{"row":163,"column":24},"end":{"row":163,"column":25},"action":"insert","lines":["o"],"id":8}],[{"start":{"row":163,"column":25},"end":{"row":163,"column":26},"action":"insert","lines":["t"],"id":9}],[{"start":{"row":167,"column":49},"end":{"row":167,"column":62},"action":"remove","lines":["WEBHOOK_HERE'"],"id":10},{"start":{"row":167,"column":49},"end":{"row":167,"column":96},"action":"insert","lines":["https://kik-botinteractive-k27mehta.c9users.io."]}],[{"start":{"row":167,"column":95},"end":{"row":167,"column":96},"action":"remove","lines":["."],"id":11}],[{"start":{"row":167,"column":95},"end":{"row":167,"column":96},"action":"insert","lines":["/"],"id":12}],[{"start":{"row":167,"column":96},"end":{"row":167,"column":97},"action":"insert","lines":["i"],"id":13}],[{"start":{"row":167,"column":97},"end":{"row":167,"column":98},"action":"insert","lines":["n"],"id":14}],[{"start":{"row":167,"column":98},"end":{"row":167,"column":99},"action":"insert","lines":["c"],"id":15}],[{"start":{"row":167,"column":99},"end":{"row":167,"column":100},"action":"insert","lines":["o"],"id":16}],[{"start":{"row":167,"column":100},"end":{"row":167,"column":101},"action":"insert","lines":["m"],"id":17}],[{"start":{"row":167,"column":101},"end":{"row":167,"column":102},"action":"insert","lines":["i"],"id":18}],[{"start":{"row":167,"column":102},"end":{"row":167,"column":103},"action":"insert","lines":["n"],"id":19}],[{"start":{"row":167,"column":103},"end":{"row":167,"column":104},"action":"insert","lines":["g"],"id":20}],[{"start":{"row":169,"column":37},"end":{"row":169,"column":38},"action":"remove","lines":["1"],"id":21}],[{"start":{"row":169,"column":37},"end":{"row":169,"column":38},"action":"insert","lines":["0"],"id":22}],[{"start":{"row":169,"column":31},"end":{"row":169,"column":32},"action":"remove","lines":["7"],"id":23}],[{"start":{"row":169,"column":30},"end":{"row":169,"column":31},"action":"remove","lines":["2"],"id":24}],[{"start":{"row":169,"column":29},"end":{"row":169,"column":30},"action":"remove","lines":["1"],"id":25}],[{"start":{"row":169,"column":29},"end":{"row":169,"column":30},"action":"insert","lines":["0"],"id":26}],[{"start":{"row":23,"column":0},"end":{"row":24,"column":0},"action":"insert","lines":["",""],"id":27}],[{"start":{"row":24,"column":0},"end":{"row":24,"column":1},"action":"insert","lines":["i"],"id":28}],[{"start":{"row":24,"column":1},"end":{"row":24,"column":2},"action":"insert","lines":["m"],"id":29}],[{"start":{"row":24,"column":2},"end":{"row":24,"column":3},"action":"insert","lines":["p"],"id":30}],[{"start":{"row":24,"column":3},"end":{"row":24,"column":4},"action":"insert","lines":["o"],"id":31}],[{"start":{"row":24,"column":4},"end":{"row":24,"column":5},"action":"insert","lines":["r"],"id":32}],[{"start":{"row":24,"column":5},"end":{"row":24,"column":6},"action":"insert","lines":["t"],"id":33}],[{"start":{"row":24,"column":6},"end":{"row":24,"column":7},"action":"insert","lines":[" "],"id":34}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["8"],"id":35}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["8"],"id":36}],[{"start":{"row":24,"column":9},"end":{"row":25,"column":0},"action":"insert","lines":["",""],"id":37}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"remove","lines":["88"],"id":38},{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["o"]}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["s"],"id":39}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"remove","lines":["0"],"id":40}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["2"],"id":41}],[{"start":{"row":171,"column":30},"end":{"row":171,"column":31},"action":"insert","lines":["7"],"id":42}],[{"start":{"row":171,"column":31},"end":{"row":171,"column":32},"action":"insert","lines":["0"],"id":43}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["1"],"id":44}],[{"start":{"row":171,"column":32},"end":{"row":171,"column":33},"action":"remove","lines":["0"],"id":45}],[{"start":{"row":171,"column":37},"end":{"row":171,"column":38},"action":"remove","lines":["0"],"id":46}],[{"start":{"row":171,"column":37},"end":{"row":171,"column":38},"action":"insert","lines":["1"],"id":47}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"remove","lines":["os"],"id":48},{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"insert","lines":["88"]},{"start":{"row":171,"column":29},"end":{"row":171,"column":32},"action":"remove","lines":["127"]},{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["0"]},{"start":{"row":171,"column":35},"end":{"row":171,"column":36},"action":"remove","lines":["1"]},{"start":{"row":171,"column":35},"end":{"row":171,"column":36},"action":"insert","lines":["0"]}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"remove","lines":["8"],"id":49}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"remove","lines":["8"],"id":50}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["o"],"id":51}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["s"],"id":52}],[{"start":{"row":169,"column":104},"end":{"row":169,"column":105},"action":"insert","lines":["'"],"id":53}],[{"start":{"row":165,"column":30},"end":{"row":165,"column":32},"action":"remove","lines":["BO"],"id":54},{"start":{"row":165,"column":30},"end":{"row":166,"column":0},"action":"insert","lines":["89cd34b3-1467-400e-8036-f97bb6fb7650",""]}],[{"start":{"row":165,"column":66},"end":{"row":166,"column":14},"action":"remove","lines":["","T_API_KEY_HERE"],"id":55}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":2,"column":14},"end":{"row":2,"column":44},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1502566388668,"hash":"99ee4478e169cb6fff453b2637074a178be66821"}
|
nilq/baby-python
|
python
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List, Literal, Optional, TypedDict
from typing_extensions import NotRequired, Required
from .activity import PartialPresenceUpdate
from .voice import GuildVoiceState
from .integration import BaseIntegration, IntegrationApplication
from .role import Role
from .channel import ChannelType, StageInstance
from .interactions import Interaction
from .invite import InviteTargetType
from .emoji import Emoji, PartialEmoji
from .member import MemberWithUser
from .snowflake import Snowflake
from .message import Message
from .sticker import GuildSticker
from .appinfo import GatewayAppInfo, PartialAppInfo
from .guild import Guild, UnavailableGuild
from .user import User
from .threads import Thread, ThreadMember
from .scheduled_event import GuildScheduledEvent
class SessionStartLimit(TypedDict):
total: int
remaining: int
reset_after: int
max_concurrency: int
class Gateway(TypedDict):
url: str
class GatewayBot(Gateway):
shards: int
session_start_limit: SessionStartLimit
class ReadyEvent(TypedDict):
v: int
user: User
guilds: List[UnavailableGuild]
session_id: str
shard: List[int] # shard_id, num_shards
application: GatewayAppInfo
ResumedEvent = Literal[None]
MessageCreateEvent = Message
class MessageDeleteEvent(TypedDict):
id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageDeleteBulkEvent(TypedDict):
ids: List[Snowflake]
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageUpdateEvent(Message):
channel_id: Snowflake
class MessageReactionAddEvent(TypedDict):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
member: NotRequired[MemberWithUser]
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveEvent(TypedDict):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveAllEvent(TypedDict):
message_id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveEmojiEvent(TypedDict):
emoji: PartialEmoji
message_id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
InteractionCreateEvent = Interaction
PresenceUpdateEvent = PartialPresenceUpdate
UserUpdateEvent = User
class InviteCreateEvent(TypedDict):
channel_id: Snowflake
code: str
created_at: str
max_age: int
max_uses: int
temporary: bool
uses: Literal[0]
guild_id: NotRequired[Snowflake]
inviter: NotRequired[User]
target_type: NotRequired[InviteTargetType]
target_user: NotRequired[User]
target_application: NotRequired[PartialAppInfo]
class InviteDeleteEvent(TypedDict):
channel_id: Snowflake
code: str
guild_id: NotRequired[Snowflake]
class _ChannelEvent(TypedDict):
id: Snowflake
type: ChannelType
ChannelCreateEvent = ChannelUpdateEvent = ChannelDeleteEvent = _ChannelEvent
class ChannelPinsUpdateEvent(TypedDict):
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
last_pin_timestamp: NotRequired[Optional[str]]
class ThreadCreateEvent(Thread, total=False):
newly_created: bool
members: List[ThreadMember]
ThreadUpdateEvent = Thread
class ThreadDeleteEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
parent_id: Snowflake
type: ChannelType
class ThreadListSyncEvent(TypedDict):
guild_id: Snowflake
threads: List[Thread]
members: List[ThreadMember]
channel_ids: NotRequired[List[Snowflake]]
class ThreadMemberUpdate(ThreadMember):
guild_id: Snowflake
class ThreadMembersUpdate(TypedDict):
id: Snowflake
guild_id: Snowflake
member_count: int
added_members: NotRequired[List[ThreadMember]]
removed_member_ids: NotRequired[List[Snowflake]]
class GuildMemberAddEvent(MemberWithUser):
guild_id: Snowflake
class GuildMemberRemoveEvent(TypedDict):
guild_id: Snowflake
user: User
class GuildMemberUpdateEvent(TypedDict):
guild_id: Snowflake
roles: List[Snowflake]
user: User
avatar: Optional[str]
joined_at: Optional[str]
nick: NotRequired[str]
premium_since: NotRequired[Optional[str]]
deaf: NotRequired[bool]
mute: NotRequired[bool]
pending: NotRequired[bool]
communication_disabled_until: NotRequired[str]
class GuildEmojisUpdateEvent(TypedDict):
guild_id: Snowflake
emojis: List[Emoji]
class GuildStickersUpdateEvent(TypedDict):
guild_id: Snowflake
stickers: List[GuildSticker]
GuildCreateEvent = GuildUpdateEvent = Guild
GuildDeleteEvent = UnavailableGuild
class _GuildBanEvent(TypedDict):
guild_id: Snowflake
user: User
GuildBanAddEvent = GuildBanRemoveEvent = _GuildBanEvent
class _GuildRoleEvent(TypedDict):
guild_id: Snowflake
role: Role
class GuildRoleDeleteEvent(TypedDict):
guild_id: Snowflake
role_id: Snowflake
GuildRoleCreateEvent = GuildRoleUpdateEvent = _GuildRoleEvent
class GuildMembersChunkEvent(TypedDict):
guild_id: Snowflake
members: List[MemberWithUser]
chunk_index: int
chunk_count: int
not_found: NotRequired[List[Snowflake]]
presences: NotRequired[List[PresenceUpdateEvent]]
nonce: NotRequired[str]
class GuildIntegrationsUpdateEvent(TypedDict):
guild_id: Snowflake
class _IntegrationEvent(BaseIntegration, total=False):
guild_id: Required[Snowflake]
role_id: Optional[Snowflake]
enable_emoticons: bool
subscriber_count: int
revoked: bool
application: IntegrationApplication
IntegrationCreateEvent = IntegrationUpdateEvent = _IntegrationEvent
class IntegrationDeleteEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
application_id: NotRequired[Snowflake]
class WebhooksUpdateEvent(TypedDict):
guild_id: Snowflake
channel_id: Snowflake
StageInstanceCreateEvent = StageInstanceUpdateEvent = StageInstanceDeleteEvent = StageInstance
GuildScheduledEventCreateEvent = GuildScheduledEventUpdateEvent = GuildScheduledEventDeleteEvent = GuildScheduledEvent
class _GuildScheduledEventUsersEvent(TypedDict):
guild_scheduled_event_id: Snowflake
user_id: Snowflake
guild_id: Snowflake
GuildScheduledEventUserAdd = GuildScheduledEventUserRemove = _GuildScheduledEventUsersEvent
VoiceStateUpdateEvent = GuildVoiceState
class VoiceServerUpdateEvent(TypedDict):
token: str
guild_id: Snowflake
endpoint: Optional[str]
class TypingStartEvent(TypedDict):
channel_id: Snowflake
user_id: Snowflake
timestamp: int
guild_id: NotRequired[Snowflake]
member: NotRequired[MemberWithUser]
|
nilq/baby-python
|
python
|
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
return sum(list(column) != sorted(column) for column in zip(*A))
|
nilq/baby-python
|
python
|
import sys
import os
import os.path
import re
import shutil
from setuptools import setup
from setuptools.command.install_lib import install_lib
from setuptools.command.install import install
import setuptools.command.bdist_egg
import distutils.spawn
import subprocess
import sys
import glob
exclude_directories = lambda files: [ file for file in files if not os.path.isdir(file) ]
share_checklist_files=glob.glob("checklists/*")
pt_steps_files=glob.glob("pt_steps/*")
conf_files=glob.glob("conf/*")
doc_files=exclude_directories(glob.glob("doc/*"))
doc_pte_files=exclude_directories(glob.glob("doc/processtrak_example/*"))
doc_pte_creston_files=exclude_directories(glob.glob("doc/processtrak_example/creston_jan2016/*"))
xslt_files=glob.glob("xslt/*")
root_files=["README.txt","INSTALL.txt"]
#limatix_widgets_glade_catalogs_package_files=["*.xml"]
limatix_widgets_package_files=["*.glade","glade_catalogs/*"]
limatix_checklist_steps_package_files=["*.glade"]
limatix_package_files=["pt_steps/*.py","*.glade","limatix_checklists/*","limatix_conf/*", "limatix_plans/*"]
console_scripts=["datacollect2",
"dc_checklist",
"pt_checkprovenance",
"dc_chx2chf",
"dc_glade",
"dc_gui",
"dc_paramdb2",
"thermal2limatix",
"processtrak",
"dc_ricohphoto",
"dc_xlg2dpd",
"pt_cleanup",
"limatix-git"]
gui_scripts = [] # Could move graphical scrips into here to eliminate stdio window on Windows (where would error messages go?)
console_scripts_entrypoints = [ "%s = limatix.bin.%s:main" % (script,script.replace("-","_")) for script in console_scripts ]
gui_scripts_entrypoints = [ "%s = limatix.bin.%s:main" % (script,script.replace("-","_")) for script in gui_scripts ]
canonicalize_path_config_files=["limatix/canonicalize_path/canonical_paths.conf.example","limatix/canonicalize_path/tag_index_paths.conf.example"]
canonicalize_path_package_files=["canonical_paths.conf","tag_index_paths.conf"]
limatix_checklist_step_paths=glob.glob("limatix/steps/*.py")
limatix_checklist_step_names=[ os.path.splitext(os.path.split(path)[1])[0] for path in limatix_checklist_step_paths if not path.endswith("__init__.py")]
limatix_checklist_step_entrypoints = [ '%s = limatix.steps.%s' % (stepname,stepname) for stepname in limatix_checklist_step_names]
limatix_widget_paths=glob.glob("limatix/widgets/*.py")
limatix_widget_names=[ os.path.splitext(os.path.split(path)[1])[0] for path in limatix_widget_paths if not path.endswith("__init__.py")]
limatix_widget_entrypoints = [ '%s = limatix.widgets.%s' % (widgetname,widgetname) for widgetname in limatix_widget_names]
#package_files=["canonical_paths.conf","tag_index_paths.conf"]
# NOTE ***: share files will be installed to prefix/share/limatix
# By default, prefix is /usr so share_files to be found in
# /usr/share/limatix
# Apply hotfix to setuptools issue #130, from
# https://bitbucket.org/pypa/setuptools/issues/130/install_data-doesnt-respect-prefix
# hotfix applies at least to all setuptools versions prior to 20.2
def setuptools_command_bdist_egg_call_command_hotfix(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
if cmdname != 'install_data':
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
setuptools_version=tuple([int(versionpart) for versionpart in setuptools.__version__.split(".")[:3]])
# Apply hotfix to all versions prior to 20.2
if setuptools_version < (20,2):
setuptools.command.bdist_egg.call_command=setuptools_command_bdist_egg_call_command_hotfix
pass
class install_lib_save_prefix_and_version(install_lib):
"""Save a file install_prefix.txt with the install prefix"""
def run(self):
install_lib.run(self)
#sys.stderr.write("\nprefix:" + str((self.distribution.command_obj["install"].prefix))+"\n\n\n")
#sys.stderr.write("\ninstall_dir:" + self.install_dir+"\n\n\n")
#sys.stderr.write("\npackages:" + str(self.distribution.command_obj["build_py"].packages)+"\n\n\n")
for package in self.distribution.command_obj["build_py"].packages:
install_dir=os.path.join(*([self.install_dir] + package.split('.')))
fh=open(os.path.join(install_dir,"install_prefix.txt"),"w")
#fh.write(self.distribution.command_obj["install"].prefix)
# Fix for Ubuntu: install_data seems to be the prefix
# for where stuff is installed (?)
fh.write(self.distribution.command_obj["install"].install_data)
fh.close()
fh=open(os.path.join(install_dir,"version.txt"),"w")
fh.write("%s\n" % (version)) # version global, as created below
fh.close()
pass
pass
pass
# Extract GIT version
if os.path.exists(".git") and distutils.spawn.find_executable("git") is not None:
# Check if tree has been modified
modified = subprocess.call(["git","diff-index","--quiet","HEAD","--"]) != 0
gitrev = subprocess.check_output(["git","rev-parse","HEAD"]).strip().decode('utf-8')
version = "git-%s" % (gitrev)
# See if we can get a more meaningful description from "git describe"
try:
versionraw=subprocess.check_output(["git","describe","--tags","--match=v*"],stderr=subprocess.STDOUT).decode('utf-8').strip()
# versionraw is like v0.1.0-50-g434343
# for compatibility with PEP 440, change it to
# something like 0.1.0+50.g434343
matchobj=re.match(r"""v([^.]+[.][^.]+[.][^-.]+)(-.*)?""",versionraw)
version=matchobj.group(1)
if matchobj.group(2) is not None:
#version += '+'+matchobj.group(2)[1:].replace("-",".")
version += '.'+matchobj.group(2)[1:].replace("-",".")
pass
pass
except subprocess.CalledProcessError:
# Ignore error, falling back to above version string
pass
if modified and version.find('+') >= 0:
version += ".modified"
pass
elif modified:
#version += "+modified"
version += ".modified"
pass
pass
else:
version = "UNKNOWN"
pass
print("version = %s" % (version))
setup(name="limatix",
description="Automated data collection",
author="Stephen D. Holland",
version=version,
# url="http://limatix.org/dataguzzler",
zip_safe=False,
packages=["limatix",
"limatix.steps",
"limatix.bin",
"limatix.widgets",
"limatix.canonicalize_path",
"limatix.dc_lxml_treesync"],
package_dir={"limatix.canonicalize_path": "limatix/canonicalize_path/canonicalize_path"},
cmdclass={"install_lib": install_lib_save_prefix_and_version},
data_files=[ ("share/limatix/checklists",share_checklist_files),
("share/limatix/pt_steps",pt_steps_files),
("share/limatix/conf",conf_files),
("share/limatix/doc",doc_files),
("share/limatix/doc/processtrak_example",doc_pte_files),
("share/limatix/doc/processtrak_example/creston_jan2016",doc_pte_creston_files),
("share/limatix/xslt",xslt_files),
("share/limatix",root_files),
("etc/canonicalize_path",canonicalize_path_config_files)],
package_data={"limatix.canonicalize_path": canonicalize_path_package_files,
"limatix.widgets": limatix_widgets_package_files,
"limatix.steps": limatix_checklist_steps_package_files,
"limatix": limatix_package_files},
entry_points={
"limatix.checklist_search_path": [ "limatix.checklist_search_path_entry=limatix:getchecklisturlpath"],
"limatix.checklist.step": limatix_checklist_step_entrypoints,
"limatix.widget": limatix_widget_entrypoints,
"limatix.datacollect2.config_url_search_path": [ "limatix.share.conf = limatix:getconfigurlpath" ],
"limatix.processtrak.step_url_search_path": [ "limatix.share.pt_steps = limatix:getptstepurlpath" ],
"console_scripts": console_scripts_entrypoints,
"gui_scripts": gui_scripts_entrypoints })
# scripts=["bin/datacollect2",
# "bin/dc_checklist",
# "bin/pt_checkprovenance",
# "bin/dc_chx2chf",
# "bin/dc_glade",
# "bin/dc_gui",
# "bin/dc_paramdb2",
# "bin/thermal2limatix",
# "bin/processtrak",
# "bin/dc_ricohphoto",
# "bin/dc_xlg2dpd",
# "bin/pt_cleanup"],
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from bindings.csw.graph_style_type import GraphStyleType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class GraphStyle1(GraphStyleType):
"""The style descriptor for a graph consisting of a number of features.
Describes graph-specific style attributes.
"""
class Meta:
name = "GraphStyle"
namespace = "http://www.opengis.net/gml"
|
nilq/baby-python
|
python
|
#####################################################
## librealsense streams test ##
#####################################################
# This assumes .so file is found on the same directory
import pyrealsense2 as rs
# Prettier prints for reverse-engineering
from pprint import pprint
# Get realsense pipeline handle
pipe = rs.pipeline()
# Print all connected devices and find the T265
devices = rs.context().devices
for i in range(len(devices)):
print('---------------------------')
# Other fields of camera_info: https://intelrealsense.github.io/librealsense/python_docs/_generated/pyrealsense2.camera_info.html
print('Found connected device #', i + 1, ':', devices[i].get_info(rs.camera_info.name), ', serial no: ', devices[i].get_info(rs.camera_info.serial_number))
print('Available streams for this device:')
pprint(dir(rs.stream))
|
nilq/baby-python
|
python
|
# ██████ ██▓ ▄▄▄ ██▒ █▓ ██▓ ▄████▄ ██▓███ ██▓▒██ ██▒▓█████ ██▓
# ▒██ ▒ ▓██▒ ▒████▄ ▓██░ █▒▓██▒▒██▀ ▀█ ▓██░ ██▒▓██▒▒▒ █ █ ▒░▓█ ▀ ▓██▒
# ░ ▓██▄ ▒██░ ▒██ ▀█▄▓██ █▒░▒██▒▒▓█ ▄ ▓██░ ██▓▒▒██▒░░ █ ░▒███ ▒██░
# ▒ ██▒▒██░ ░██▄▄▄▄██▒██ █░░░██░▒▓▓▄ ▄██▒ ▒██▄█▓▒ ▒░██░ ░ █ █ ▒ ▒▓█ ▄ ▒██░
# ▒██████▒▒░██████▒▓█ ▓██▒▒▀█░ ░██░▒ ▓███▀ ░ ▒██▒ ░ ░░██░▒██▒ ▒██▒░▒████▒░██████▒
# ▒ ▒▓▒ ▒ ░░ ▒░▓ ░▒▒ ▓▒█░░ ▐░ ░▓ ░ ░▒ ▒ ░ ▒▓▒░ ░ ░░▓ ▒▒ ░ ░▓ ░░░ ▒░ ░░ ▒░▓ ░
# ░ ░▒ ░ ░░ ░ ▒ ░ ▒ ▒▒ ░░ ░░ ▒ ░ ░ ▒ ░▒ ░ ▒ ░░░ ░▒ ░ ░ ░ ░░ ░ ▒ ░
# ░ ░ ░ ░ ░ ░ ▒ ░░ ▒ ░░ ░░ ▒ ░ ░ ░ ░ ░ ░
# ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
# ░ ░
import os
import re
import socket
import subprocess
from libqtile import qtile
from libqtile.config import Click, Drag, Group, KeyChord, Key, Match, Screen
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile.lazy import lazy
from typing import List # noqa: F401
from libqtile.widget import Spacer
from libqtile.utils import guess_terminal
from nic import get_nic_name
from datetime import datetime
mod = "mod4"
mod1 = "alt"
mod2 = "control"
home = os.path.expanduser('~')
terminal = guess_terminal()
myTerm="alacritty"
interface_name = get_nic_name() # set get_nic_name(wired) if using a wired connection
current_year = datetime.now().year
@lazy.function
def window_to_prev_group(qtile):
if qtile.currentWindow is not None:
i = qtile.groups.index(qtile.currentGroup)
qtile.currentWindow.togroup(qtile.groups[i - 1].name)
@lazy.function
def window_to_next_group(qtile):
if qtile.currentWindow is not None:
i = qtile.groups.index(qtile.currentGroup)
qtile.currentWindow.togroup(qtile.groups[i + 1].name)
keys = [
# FUNCTION KEYS
Key([], "F12", lazy.spawn('xfce4-terminal --drop-down')),
# SUPER + FUNCTION KEYS
Key([mod], "f", lazy.window.toggle_fullscreen()),
Key([mod], "q", lazy.window.kill()),
Key([mod], "x", lazy.spawn('arcolinux-logout')),
Key([mod], "Escape", lazy.spawn('xkill')),
Key([mod], "Return", lazy.spawn(myTerm)),
# SUPER + SHIFT KEYS
Key([mod, "shift"], "Return", lazy.spawn('thunar')),
#Key([mod, "shift"], "d", lazy.spawn("dmenu_run -i -nb '#191919' -nf '#fea63c' -sb '#fea63c' -sf '#191919' -fn 'NotoMonoRegular:bold:pixelsize=14'")),
Key([mod, "shift"], "d", lazy.spawn("dmenu_run -i -h 38 -fn 'UbuntuMono:bold:pixelsize=22'")),
Key([mod, "shift"], "q", lazy.window.kill()),
Key([mod, "shift"], "r", lazy.restart()),
Key([mod, "control"], "r", lazy.restart()),
# Key([mod, "shift"], "x", lazy.shutdown()),
# CONTROL + ALT KEYS
# Key(["mod1", "control"], "e", lazy.spawn('arcolinux-tweak-tool')),
Key(["mod1", "control"], "e", lazy.spawn("emacsclient -c -a 'emacs'")),
Key(["mod1", "control"], "f", lazy.spawn('firefox')),
Key(["mod1", "control"], "c", lazy.spawn('code')),
Key(["mod1", "control"], "i", lazy.spawn('nitrogen')),
Key(["mod1", "control"], "p", lazy.spawn('pavucontrol')),
Key(["mod1", "control"], "v", lazy.spawn('virt-manager')),
Key(["mod1", "control"], "b", lazy.spawn('brave')),
Key(["mod1", "control"], "s", lazy.spawn('steam')),
Key(["mod1", "control"], "t", lazy.spawn('thunderbird')),
Key(["mod1", "control"], "q", lazy.spawn(myTerm + ' -e nvim /home/pixel/.config/qtile/config.py')),
# CONTROL + SHIFT KEYS
Key([mod2, "shift"], "Escape", lazy.spawn('xfce4-taskmanager')),
# SCREENSHOTS
#Key([], "Print", lazy.spawn("scrot 'ArcoLinux-%Y-%m-%d-%s_screenshot_$wx$h.jpg' -e 'mv $f $$(xdg-user-dir PICTURES)'")),
Key([], "Print", lazy.spawn('xfce4-screenshooter')),
Key([mod], "Print", lazy.spawn('thunar /home/pixel/Pictures/Screenshots')),
Key([mod2, "shift"], "Print", lazy.spawn('gnome-screenshot -i')),
# MULTIMEDIA KEYS
Key([], "XF86Calculator", lazy.spawn("qalculate-gtk")),
# INCREASE/DECREASE/MUTE VOLUME
Key([], "XF86AudioMute", lazy.spawn("amixer -q set Master toggle")),
Key([], "XF86AudioLowerVolume", lazy.spawn("amixer -q set Master 5%-")),
Key([], "XF86AudioRaiseVolume", lazy.spawn("amixer -q set Master 5%+")),
# QTILE LAYOUT KEYS
Key([mod], "n", lazy.layout.normalize()),
Key([mod], "space", lazy.next_layout()),
# CHANGE FOCUS
Key([mod], "Up", lazy.layout.up()),
Key([mod], "Down", lazy.layout.down()),
Key([mod], "Left", lazy.layout.left()),
Key([mod], "Right", lazy.layout.right()),
Key([mod], "k", lazy.layout.up()),
Key([mod], "j", lazy.layout.down()),
Key([mod], "h", lazy.layout.left()),
Key([mod], "l", lazy.layout.right()),
# RESIZE UP, DOWN, LEFT, RIGHT
Key([mod, "control"], "l",
lazy.layout.grow_right(),
lazy.layout.grow(),
lazy.layout.increase_ratio(),
lazy.layout.delete(),
),
Key([mod, "control"], "Right",
lazy.layout.grow_right(),
lazy.layout.grow(),
lazy.layout.increase_ratio(),
lazy.layout.delete(),
),
Key([mod, "control"], "h",
lazy.layout.grow_left(),
lazy.layout.shrink(),
lazy.layout.decrease_ratio(),
lazy.layout.add(),
),
Key([mod, "control"], "Left",
lazy.layout.grow_left(),
lazy.layout.shrink(),
lazy.layout.decrease_ratio(),
lazy.layout.add(),
),
Key([mod, "control"], "k",
lazy.layout.grow_up(),
lazy.layout.grow(),
lazy.layout.decrease_nmaster(),
),
Key([mod, "control"], "Up",
lazy.layout.grow_up(),
lazy.layout.grow(),
lazy.layout.decrease_nmaster(),
),
Key([mod, "control"], "j",
lazy.layout.grow_down(),
lazy.layout.shrink(),
lazy.layout.increase_nmaster(),
),
Key([mod, "control"], "Down",
lazy.layout.grow_down(),
lazy.layout.shrink(),
lazy.layout.increase_nmaster(),
),
# FLIP LAYOUT FOR MONADTALL/MONADWIDE
Key([mod, "shift"], "f", lazy.layout.flip()),
# FLIP LAYOUT FOR BSP
Key([mod, "mod1"], "k", lazy.layout.flip_up()),
Key([mod, "mod1"], "j", lazy.layout.flip_down()),
Key([mod, "mod1"], "l", lazy.layout.flip_right()),
Key([mod, "mod1"], "h", lazy.layout.flip_left()),
# MOVE WINDOWS UP OR DOWN BSP LAYOUT
Key([mod, "shift"], "k", lazy.layout.shuffle_up()),
Key([mod, "shift"], "j", lazy.layout.shuffle_down()),
Key([mod, "shift"], "h", lazy.layout.shuffle_left()),
Key([mod, "shift"], "l", lazy.layout.shuffle_right()),
# MOVE WINDOWS UP OR DOWN MONADTALL/MONADWIDE LAYOUT
Key([mod, "shift"], "Up", lazy.layout.shuffle_up()),
Key([mod, "shift"], "Down", lazy.layout.shuffle_down()),
Key([mod, "shift"], "Left", lazy.layout.swap_left()),
Key([mod, "shift"], "Right", lazy.layout.swap_right()),
# TOGGLE FLOATING LAYOUT
Key([mod, "shift"], "space", lazy.window.toggle_floating()),]
group_names = [("1 ", {'layout': 'monadtall'}),
("2 ", {'layout': 'monadtall'}),
("3 ", {'layout': 'monadtall'}),
("4 ", {'layout': 'monadtall'}),
("5 ", {'layout': 'monadtall'}),
("6 ", {'layout': 'monadtall'}),
("7 ", {'layout': 'monadtall'}),
("8 λ", {'layout': 'monadtall'}),
("9 ", {'layout': 'floating'})]
groups = [Group(name, **kwargs) for name, kwargs in group_names]
for i, (name, kwargs) in enumerate(group_names, 1):
keys.append(Key([mod], str(i), lazy.group[name].toscreen())) # Switch to another group
keys.append(Key([mod, "shift"], str(i), lazy.window.togroup(name))) # Send current window to another group
layout_theme = {"border_width": 2,
"margin": 8,
"border_focus": "e1acff",
"border_normal": "1D2330",
"single_border_width": 0
}
layouts = [
#layout.MonadWide(**layout_theme),
#layout.Bsp(**layout_theme),
#layout.Stack(stacks=2, **layout_theme),
#layout.Columns(**layout_theme),
#layout.RatioTile(**layout_theme),
#layout.VerticalTile(**layout_theme),
#layout.Matrix(**layout_theme),
#layout.Zoomy(**layout_theme),
layout.MonadTall(**layout_theme),
layout.Max(**layout_theme),
layout.Tile(shift_windows=True, **layout_theme),
layout.Stack(num_stacks=2),
layout.TreeTab(
font = "Ubuntu",
fontsize = 10,
sections = ["FIRST", "SECOND"],
section_fontsize = 11,
bg_color = "141414",
active_bg = "90C435",
active_fg = "000000",
inactive_bg = "384323",
inactive_fg = "a0a0a0",
padding_y = 5,
section_top = 10,
panel_width = 320
),
layout.Floating(**layout_theme)
]
colors = [["#282c34", "#282c34"], # panel background
["#3d3f4b", "#434758"], # background for current screen tab
["#ffffff", "#ffffff"], # font color for group names
["#ff5555", "#ff5555"], # border line color for current tab
["#74438f", "#74438f"], # border line color for 'other tabs' and color for 'odd widgets'
["#4f76c7", "#4f76c7"], # color for the 'even widgets'
["#e1acff", "#e1acff"], # window name
["#808080", "#808080"]] # vertical line color
##### DEFAULT WIDGET SETTINGS #####
widget_defaults = dict(
font="FiraCode Nerd Font",
fontsize = 21,
padding = 2,
background=colors[2]
)
extension_defaults = widget_defaults.copy()
def init_widgets_list():
prompt = "{0}@{1}: ".format(os.environ["USER"], socket.gethostname())
widgets_list = [
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.Image(
filename = "~/.config/qtile/icons/tux.png",
scale = "False",
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm)},
background = colors[0]
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.GroupBox(
font = "FiraCode Nerd Font",
fontsize = 18,
margin_y = 3,
margin_x = 0,
padding_y = 5,
padding_x = 3,
borderwidth = 3,
active = "#ff71ce",
inactive = colors[2],
rounded = False,
highlight_color = colors[0],
highlight_method = "line",
this_current_screen_border = colors[6],
this_screen_border = colors [4],
other_current_screen_border = colors[6],
other_screen_border = colors[4],
foreground = colors[2],
background = colors[0]
),
widget.Prompt(
prompt = prompt,
font = "Ubuntu Mono",
padding = 10,
foreground = colors[3],
background = colors[1],
fontsize = 16
),
widget.Sep(
linewidth = 0,
padding = 40,
foreground = colors[2],
background = colors[0]
),
widget.WindowName(
foreground = colors[6],
background = colors[0],
padding = 0
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[0],
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CheckUpdates(
update_interval = 1800,
distro = "Arch_checkupdates",
display_format = "⟳{updates} Updates",
foreground = colors[6],
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + ' -e sudo pacman -Syu')},
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CPU(
format = '\uf108 cpu: {load_percent}% {freq_current}GHz',
foreground = '#ecbe7b',
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text = " 🌡",
padding = 2,
foreground = '#01cdfe',
background = colors[0],
fontsize = 16
),
widget.ThermalSensor(
foreground = '#01cdfe',
background = colors[0],
threshold = 90,
padding = 5,
tag_sensor = "Package id 0"
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text = " 🌡",
padding = 2,
foreground = '#05ffa1',
background = colors[0],
fontsize = 16
),
widget.NvidiaSensors(
foreground = '#05ffa1',
background = colors[0],
format = 'gpu: {temp}°C'
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Memory(
foreground = '#ff6c6b',
background = colors[0],
format = '\uf233 {MemUsed: .0f}M/{MemTotal: .0f}M',
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + ' -e htop')},
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Net(
interface = interface_name,
format = '\uf0ab {down} \uf0aa {up}',
foreground = '#fffb96',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text=" ",
foreground='#ff71ce',
background=colors[0],
font="Font Awesome 5 Free Solid",
# fontsize=38,
),
widget.Volume(
#foreground = '#828CF6',
foreground='#ff71ce',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CurrentLayoutIcon(
custom_icon_paths = [os.path.expanduser("~/.config/qtile/icons")],
foreground = '#c678dd',
background = colors[0],
padding = 0,
scale = 0.7
),
widget.CurrentLayout(
foreground = '#c678dd',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text=" ",
foreground='#46d9ff',
background=colors[0],
font="Font Awesome 5 Free Solid",
# fontsize=38,
),
widget.Clock(
foreground = '#46d9ff',
background = colors[0],
format = "%A, %B %d - %H:%M:%S",
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + f" --hold -e cal {current_year}")}
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Systray(
background = colors[0],
icon_size=21,
padding = 4
),
widget.Sep(
linewidth = 0,
padding = 10,
foreground = colors[0],
background = colors[0]
),
]
return widgets_list
widgets_list = init_widgets_list()
def init_widgets_screen1():
widgets_screen1 = init_widgets_list()
return widgets_screen1
def init_widgets_screen2():
widgets_screen2 = init_widgets_list()
del widgets_screen2[34:37]
return widgets_screen2
widgets_screen1 = init_widgets_screen1()
widgets_screen2 = init_widgets_screen2()
def init_screens():
return [Screen(top=bar.Bar(widgets=init_widgets_screen1(), size=38)),
Screen(top=bar.Bar(widgets=init_widgets_screen2(), size=26))]
screens = init_screens()
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
*layout.Floating.default_float_rules,
Match(wm_class='Arcolinux-welcome-app.py'),
Match(wm_class='Arcolinux-tweak-tool.py'),
Match(wm_class='confirm'),
Match(wm_class='dialog'),
Match(wm_class='download'),
Match(wm_class='error'),
Match(wm_class='file_progress'),
Match(wm_class='notification'),
Match(wm_class='splash'),
Match(wm_class='toolbar'),
Match(wm_class='confirmreset'),
Match(wm_class='makebranch'),
Match(wm_class='maketag'),
Match(wm_class='Arandr'),
Match(wm_class='feh'),
Match(wm_class='Galculator'),
Match(wm_class='arcolinux-logout'),
Match(wm_class='xfce4-terminal'),
Match(wm_class='ssh-askpass'),
Match(wm_class='mullvad vpn'),
Match(title='branchdialog'),
Match(title='Open File'),
Match(title='pinentry'),
Match(title='Qalculate!'),
Match(title='Connman System Tray'),
Match(title='Steam'),
Match(title='Steam Login'),
], fullscreen_border_width = 0, border_width = 0)
auto_fullscreen = True
focus_on_window_activation = "smart"
@hook.subscribe.startup_once
def start_once():
home = os.path.expanduser('~')
subprocess.call([home + '/.config/qtile/scripts/autostart.sh'])
@hook.subscribe.startup
def start_always():
# Set the cursor to something sane in X
subprocess.Popen(['xsetroot', '-cursor_name', 'left_ptr'])
@hook.subscribe.client_new
def set_floating(window):
if (window.window.get_wm_transient_for()
or window.window.get_wm_type() in floating_types):
window.floating = True
floating_types = ["notification", "toolbar", "splash", "dialog"]
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
nilq/baby-python
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/worker_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.protobuf import worker_pb2 as tensorflow_dot_core_dot_protobuf_dot_worker__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/worker_service.proto',
package='tensorflow.grpc',
syntax='proto3',
serialized_pb=_b('\n-tensorflow/core/protobuf/worker_service.proto\x12\x0ftensorflow.grpc\x1a%tensorflow/core/protobuf/worker.proto2\x99\x07\n\rWorkerService\x12H\n\tGetStatus\x12\x1c.tensorflow.GetStatusRequest\x1a\x1d.tensorflow.GetStatusResponse\x12\x66\n\x13\x43reateWorkerSession\x12&.tensorflow.CreateWorkerSessionRequest\x1a\'.tensorflow.CreateWorkerSessionResponse\x12\x66\n\x13\x44\x65leteWorkerSession\x12&.tensorflow.DeleteWorkerSessionRequest\x1a\'.tensorflow.DeleteWorkerSessionResponse\x12T\n\rRegisterGraph\x12 .tensorflow.RegisterGraphRequest\x1a!.tensorflow.RegisterGraphResponse\x12Z\n\x0f\x44\x65registerGraph\x12\".tensorflow.DeregisterGraphRequest\x1a#.tensorflow.DeregisterGraphResponse\x12\x45\n\x08RunGraph\x12\x1b.tensorflow.RunGraphRequest\x1a\x1c.tensorflow.RunGraphResponse\x12Q\n\x0c\x43leanupGraph\x12\x1f.tensorflow.CleanupGraphRequest\x1a .tensorflow.CleanupGraphResponse\x12K\n\nCleanupAll\x12\x1d.tensorflow.CleanupAllRequest\x1a\x1e.tensorflow.CleanupAllResponse\x12M\n\nRecvTensor\x12\x1d.tensorflow.RecvTensorRequest\x1a\x1e.tensorflow.RecvTensorResponse\"\x00\x12\x42\n\x07Logging\x12\x1a.tensorflow.LoggingRequest\x1a\x1b.tensorflow.LoggingResponse\x12\x42\n\x07Tracing\x12\x1a.tensorflow.TracingRequest\x1a\x1b.tensorflow.TracingResponseB3\n\x1aorg.tensorflow.distruntimeB\x13WorkerServiceProtosP\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032org.tensorflow.distruntimeB\023WorkerServiceProtosP\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class WorkerServiceStub(object):
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/tensorflow.grpc.WorkerService/GetStatus',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.FromString,
)
self.CreateWorkerSession = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CreateWorkerSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.FromString,
)
self.DeleteWorkerSession = channel.unary_unary(
'/tensorflow.grpc.WorkerService/DeleteWorkerSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.FromString,
)
self.RegisterGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RegisterGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.FromString,
)
self.DeregisterGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/DeregisterGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.FromString,
)
self.RunGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RunGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.FromString,
)
self.CleanupGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CleanupGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.FromString,
)
self.CleanupAll = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CleanupAll',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.FromString,
)
self.RecvTensor = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RecvTensor',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.FromString,
)
self.Logging = channel.unary_unary(
'/tensorflow.grpc.WorkerService/Logging',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.FromString,
)
self.Tracing = channel.unary_unary(
'/tensorflow.grpc.WorkerService/Tracing',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.FromString,
)
class WorkerServiceServicer(object):
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RegisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeregisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CleanupGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CleanupAll(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RecvTensor(self, request, context):
"""See worker.proto for details.
RecvTensor Method
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Logging(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Tracing(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WorkerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.SerializeToString,
),
'CreateWorkerSession': grpc.unary_unary_rpc_method_handler(
servicer.CreateWorkerSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.SerializeToString,
),
'DeleteWorkerSession': grpc.unary_unary_rpc_method_handler(
servicer.DeleteWorkerSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.SerializeToString,
),
'RegisterGraph': grpc.unary_unary_rpc_method_handler(
servicer.RegisterGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.SerializeToString,
),
'DeregisterGraph': grpc.unary_unary_rpc_method_handler(
servicer.DeregisterGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.SerializeToString,
),
'RunGraph': grpc.unary_unary_rpc_method_handler(
servicer.RunGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.SerializeToString,
),
'CleanupGraph': grpc.unary_unary_rpc_method_handler(
servicer.CleanupGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.SerializeToString,
),
'CleanupAll': grpc.unary_unary_rpc_method_handler(
servicer.CleanupAll,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.SerializeToString,
),
'RecvTensor': grpc.unary_unary_rpc_method_handler(
servicer.RecvTensor,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.SerializeToString,
),
'Logging': grpc.unary_unary_rpc_method_handler(
servicer.Logging,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.SerializeToString,
),
'Tracing': grpc.unary_unary_rpc_method_handler(
servicer.Tracing,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.grpc.WorkerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaWorkerServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CreateWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RegisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeregisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RunGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CleanupGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CleanupAll(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RecvTensor(self, request, context):
"""See worker.proto for details.
RecvTensor Method
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Logging(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Tracing(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaWorkerServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
GetStatus.future = None
def CreateWorkerSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CreateWorkerSession.future = None
def DeleteWorkerSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
DeleteWorkerSession.future = None
def RegisterGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
RegisterGraph.future = None
def DeregisterGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
DeregisterGraph.future = None
def RunGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
RunGraph.future = None
def CleanupGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CleanupGraph.future = None
def CleanupAll(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CleanupAll.future = None
def RecvTensor(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
RecvTensor Method
"""
raise NotImplementedError()
RecvTensor.future = None
def Logging(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
Logging.future = None
def Tracing(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
Tracing.future = None
def beta_create_WorkerService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.FromString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.FromString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.FromString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.FromString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.FromString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.FromString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.FromString,
}
response_serializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.SerializeToString,
}
method_implementations = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): face_utilities.unary_unary_inline(servicer.CleanupAll),
('tensorflow.grpc.WorkerService', 'CleanupGraph'): face_utilities.unary_unary_inline(servicer.CleanupGraph),
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): face_utilities.unary_unary_inline(servicer.CreateWorkerSession),
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): face_utilities.unary_unary_inline(servicer.DeleteWorkerSession),
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): face_utilities.unary_unary_inline(servicer.DeregisterGraph),
('tensorflow.grpc.WorkerService', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('tensorflow.grpc.WorkerService', 'Logging'): face_utilities.unary_unary_inline(servicer.Logging),
('tensorflow.grpc.WorkerService', 'RecvTensor'): face_utilities.unary_unary_inline(servicer.RecvTensor),
('tensorflow.grpc.WorkerService', 'RegisterGraph'): face_utilities.unary_unary_inline(servicer.RegisterGraph),
('tensorflow.grpc.WorkerService', 'RunGraph'): face_utilities.unary_unary_inline(servicer.RunGraph),
('tensorflow.grpc.WorkerService', 'Tracing'): face_utilities.unary_unary_inline(servicer.Tracing),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_WorkerService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.SerializeToString,
}
response_deserializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.FromString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.FromString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.FromString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.FromString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.FromString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.FromString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.FromString,
}
cardinalities = {
'CleanupAll': cardinality.Cardinality.UNARY_UNARY,
'CleanupGraph': cardinality.Cardinality.UNARY_UNARY,
'CreateWorkerSession': cardinality.Cardinality.UNARY_UNARY,
'DeleteWorkerSession': cardinality.Cardinality.UNARY_UNARY,
'DeregisterGraph': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'Logging': cardinality.Cardinality.UNARY_UNARY,
'RecvTensor': cardinality.Cardinality.UNARY_UNARY,
'RegisterGraph': cardinality.Cardinality.UNARY_UNARY,
'RunGraph': cardinality.Cardinality.UNARY_UNARY,
'Tracing': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'tensorflow.grpc.WorkerService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
def validate_cell(cell: tuple) -> tuple:
x, y = cell
if x == 6:
x = 0
elif x == -1:
x = 5
if y == 6:
y = 0
elif y == -1:
y = 5
return x, y
def get_cell(cell: tuple, field: list) -> str:
row, col = cell
return field[row][col]
def set_cell(cell: tuple, field: list, value: int) -> None:
row, col = cell
matrix[row][col] = value
move = {
'up': lambda x: (x[0]-1, x[1]),
'down': lambda x: (x[0]+1, x[1]),
'left': lambda x: (x[0], x[1]-1),
'right': lambda x: (x[0], x[1]+1)
}
deposits = {
'W': ['Water', 0],
'M': ['Metal', 0],
'C': ['Concrete', 0]
}
matrix = [[x for x in input().split(' ')] for _ in range(6)]
current_position = next((x, y) for x in range(6) for y in range(6) if matrix[x][y] == 'E')
commands = input().split(', ')
for command in commands:
next_position = move[command](current_position)
next_position = validate_cell(next_position)
row, col = next_position
item_at_cell = get_cell(next_position, matrix)
if item_at_cell in ('W', 'M', 'C'):
deposits[item_at_cell][1] += 1
print(f"{deposits[item_at_cell][0]} deposit found at ({row}, {col})")
set_cell(current_position, matrix, '-')
set_cell(next_position, matrix, 'E')
elif item_at_cell == 'R':
print(f"Rover got broken at ({row}, {col})")
break
current_position = next_position
if deposits['W'][1] > 0 and deposits['C'][1] > 0 and deposits['M'][1] > 0:
print("Area suitable to start the colony.")
else:
print("Area not suitable to start the colony.")
|
nilq/baby-python
|
python
|
# nukedatastore tests
import pytest
import datetime
from nukedatastore import NukeDataStore, NukeDataStoreError
def test_datastore_crud(datastore):
datastore['project_data'] = {'id': 1234, 'name': 'project name'}
assert len(datastore.list()) == 1
assert datastore.list()[0] == 'project_data'
assert datastore['project_data'] == {'id': 1234, 'name': 'project name'}
def test_datastore_crud_invalid_key(datastore):
with pytest.raises(KeyError):
datastore['invalid_key']
def test_datastore_crud_invalid_data(datastore):
with pytest.raises(NukeDataStoreError):
datastore['data'] = datetime.datetime.now()
def test_datastore_crud_frozen(datastore):
datastore.freeze()
with pytest.raises(NukeDataStoreError):
datastore['project_data'] = {}
datastore.unfreeze()
def test_deleted_node(datastore, nuke):
nuke.delete(nuke.toNode('data_store'))
with pytest.raises(NukeDataStoreError):
datastore.store
def test_existing_node_init(nuke):
NukeDataStore('data_store')
x = NukeDataStore('data_store')
assert x
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Graphcore Ltd.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore Ltd.
# It has been modified to run the application on IPU hardware.
"""Validate a network.
Usage:
python validate.py tfrecord_dir/ tfrecord_dir2/
"""
import os
from absl import app, flags
from tensorflow import gfile
import dual_net
import preprocessing
import utils
flags.DEFINE_integer('examples_to_validate', 50 * 2048,
'Number of examples to run validation on.')
flags.DEFINE_string('validate_name', 'selfplay',
'Name of validation set (i.e. selfplay or human).')
flags.DEFINE_bool('expand_validation_dirs', True,
'Whether to expand the input paths by globbing. If false, '
'directly read and validate on the given files.')
# From dual_net.py
flags.declare_key_flag('work_dir')
flags.declare_key_flag('use_tpu')
flags.declare_key_flag('num_tpu_cores')
FLAGS = flags.FLAGS
def validate(*tf_records):
"""Validate a model's performance on a set of holdout data."""
if FLAGS.use_tpu:
def _input_fn(params):
return preprocessing.get_tpu_input_tensors(
params['batch_size'], tf_records, filter_amount=1.0)
else:
def _input_fn():
return preprocessing.get_ipu_input_tensors(
FLAGS.train_batch_size, tf_records, filter_amount=1.0,
shuffle_examples=False)
steps = FLAGS.examples_to_validate // FLAGS.train_batch_size
if FLAGS.use_tpu:
steps //= FLAGS.num_tpu_cores
estimator = dual_net._get_ipu_estimator(num_replicas=1, iterations_per_loop=steps)
with utils.logged_timer("Validating"):
estimator.evaluate(_input_fn, steps=steps, name=FLAGS.validate_name)
def main(argv):
"""Validate a model's performance on a set of holdout data."""
_, *validation_paths = argv
if FLAGS.expand_validation_dirs:
tf_records = []
with utils.logged_timer("Building lists of holdout files"):
for record_dir in validation_paths:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))
else:
tf_records = validation_paths
if not tf_records:
raise RuntimeError("Did not find any holdout files for validating!")
validate(*tf_records)
if __name__ == "__main__":
app.run(main)
|
nilq/baby-python
|
python
|
import torch
import warprnnt_pytorch as warp_rnnt
from torch.autograd import Function
from torch.nn import Module
from .warp_rnnt import *
__all__ = ['rnnt_loss', 'RNNTLoss']
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, reduction):
"""
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
loss_func = warp_rnnt.gpu_rnnt if is_cuda else warp_rnnt.cpu_rnnt
grads = torch.zeros_like(acts) if acts.requires_grad else torch.zeros(0).to(acts)
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, dtype=acts.dtype)
loss_func(acts,
labels,
act_lens,
label_lens,
costs,
grads,
blank,
0)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
grads /= minibatch_size
costs = costs.to(acts.device)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None
def rnnt_loss(acts, labels, act_lens, label_lens, blank=0, reduction='mean'):
""" RNN Transducer Loss
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
acts = torch.nn.functional.log_softmax(acts, -1)
return _RNNT.apply(acts, labels, act_lens, label_lens, blank, reduction)
class RNNTLoss(Module):
"""
Parameters:
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
def __init__(self, blank=0, reduction='mean'):
super(RNNTLoss, self).__init__()
self.blank = blank
self.reduction = reduction
self.loss = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
"""
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(acts, labels, act_lens, label_lens, self.blank, self.reduction)
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a length per example.")
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a label length per example.")
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError("Input length mismatch")
if U != max_U + 1:
raise ValueError("Output length mismatch")
|
nilq/baby-python
|
python
|
from utils import color
from browser import help
import random
def get(cmds, typ, add_attr=None):
'''
USE:
error.get(cmds, type, [optional:add_attr]) where add_attr must be < 3
Description:
Returns a correctly colored message according to declared "typ"
'''
#---------------------------------------------------------------
if not add_attr:
add_attr = [None,None,None]
elif len(add_attr)<3:
for i in range(3-len(add_attr)):
add_attr.append(None)
if len(cmds) < 2:
cmds.append(None)
operator = help.spfc_opr(cmds[0],True)
names=[None,None,None]
if operator == 'q':
names = ['Ken Rotaris', 'Tunahan Erbay', 'Leonardo Salsi']
random.shuffle(names) #names in random order
names[0] = color.bold(color.red(names[0]))
names[1] = color.bold(color.greenDark(names[1]))
names[2] = color.bold(color.yellow(names[2]))
random.shuffle(names)
dictionary = {
#command | messages #TODO: blank messages are not being used yet/ have not ben set yet.
'cd' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('The directory does not exist')
},
'open': {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error': color.red('unable to open file')
},
'ls' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('given directory doesn\'t exist'),
'unknown': color.red('Unknown option \'{}\''.format(cmds[1]))
},
'cat' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('file doesn\'t exist at \'{1}\''.format(cmds[0], add_attr)),
'nt_supp': color.red('file type currently not supported by \'{}\' command'.format(cmds[0])),
'hint' : color.grey('tip: use \'{}\' followed by an integer to display a range.'.format(cmds[0]))
},
'mk' : {'success': color.greenDark('folder {0} created'.format(add_attr[0])), #add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'file_error' : color.red('name cannot contain a dot'), #add_attr = [name, typ, path]
'format_error' : color.red('please use command as follows: mk <dir_name>'),
'path_error': color.red('the path the folder is to be created in does not exist'.format(add_attr))
},
'add' : {'success': color.greenDark('File added to the filesystem.'),
# add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'error': color.red('{0} "{1}" already exists at {2}'.format(add_attr[1], add_attr[0], add_attr[2])),
# add_attr = [name, typ, path]
'path_error': color.red('The source does not exist'.format(add_attr)),
'format_error': color.red('\'{}\' either outside of the filesystem or not an existing directory'.format(add_attr[2])),
'nodstdir': color.red('Destination folder does not exist.'),
'fs_error': color.red('Cannot add files from within the filesystem.')
},
'rm' : {'success': color.greenDark('deleted {0} from {1}'.format(add_attr[0], add_attr[1])), #add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'error' : color.red('{0} "{1}" does not exists at {2}'.format(add_attr[1], add_attr[0], add_attr[2])), #add_attr = [name, typ, path]
'path_error' : color.red('\'{}\' doesn\'t exist'.format(add_attr))
},
'mount' : {'success': color.greenDark('Filesystem mounted successfully.'),
'warning': color.yellow('Mount a filesystem of an other user with mnt <user> <filesystem_name> [<path>]'),
'error' : color.red('Unable to mount filesystem.'),
'nodst': color.red('Destination path does not exist.')
},
'umt' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'exp' : {'success': color.greenDark('Filesystem has been successfully exported!'),
'warning': color.yellow('wrong argument format'),
'error' : color.red('No root_mockup folder found at current location or its super folders \'{}\'.'.format(add_attr[0]))
},
'mkp' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('folder \'{0}\' already exists at \'{1}\''.format(add_attr[0], add_attr[1]))
},
'pwd' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'img' : {'success': color.greenDark('sucessfully created image \'{0}\' at \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'txt' : {'success': color.greenDark('sucessfully created text \'{0}\' at \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'mv' : {'success': color.greenDark('sucessfully moved file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('the {0} path \'{1}\' doen\'s exist'.format(add_attr[0], add_attr[1])),
'sameDir': color.grey('Information: you moving a file/folder within the same directory.'),
'nodstdir': color.red('The destination directory does not exist.'),
'nosrcdir': color.red('The source file or directory does not exist.')
},
'cp' : {'success': color.greenDark('sucessfully copied file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('the {0} path \'{1}\' doen\'s exist'.format(add_attr[0], add_attr[1]))
},
'rn' : {'success' : color.greenDark('sucessfully renamed file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning' : color.yellow('wrong argument format'),
'error' : color.red('the given path \'{0}\' doen\'s exist'.format(add_attr[0])),
'nosrcdir': color.red('The source file or directory does not exist.')
},
'f' : {'success': color.greenDark('\'{0}\' found at {1}'.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('\'{0}\' not found in \'{1}\''.format(add_attr[0], add_attr[1]))
},
'--help' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'quit' : {'success': '\n Thanks for using our Application!\n Made with ' + color.bold(
color.redLight('<3')) + ' by: {0}, {1}, {2}\n'.format(names[0], names[1], names[2]),
'warning': color.yellow('If you want to terminate program, enter q without further arguments.'),
'error' : color.red('If you want to terminate the program, enter q without further arguments.')
},
'clear' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
}
}
return dictionary[operator][typ]
|
nilq/baby-python
|
python
|
from brownie import accounts, PassiveStrategy
from brownie.network.gas.strategies import ExponentialScalingStrategy
import os
STRATEGIES = [
# "0x40C36799490042b31Efc4D3A7F8BDe5D3cB03526", # V0 ETH/USDT
# "0xA6803E6164EE978d8C511AfB23BA49AE0ae0C1C3", # old V1 ETH/USDC
# "0x5503bB32a0E37A1F0B8F8FE2006abC33C779a6FD", # old V1 ETH/USDT
"0x1cEA471aab8c57118d187315f3d6Ae1834cCD836", # V1 ETH/USDC
"0x4e03028626aa5e5d5e4CFeF2970231b0D6c5d5Ed", # V1 ETH/USDT
"0x8209df5A847C321d26eCb155CA76f95224c5DCd9", # V1 WBTC/USDC
]
def getAccount(account, pw):
from web3.auto import w3
with open(account, "r") as f:
return accounts.add(w3.eth.account.decrypt(f.read(), pw))
def main():
keeper = getAccount(os.environ["KEEPER_ACCOUNT"], os.environ["KEEPER_PW"])
# keeper = accounts.load(input("Brownie account: "))
balance = keeper.balance()
gas_strategy = ExponentialScalingStrategy("50 gwei", "1000 gwei")
for address in STRATEGIES:
print(f"Running for strategy: {address}")
strategy = PassiveStrategy.at(address)
try:
strategy.rebalance({"from": keeper, "gas_price": gas_strategy})
print("Rebalanced!")
except ValueError as e:
print(e)
print()
print(f"Gas used: {(balance - keeper.balance()) / 1e18:.4f} ETH")
print(f"New balance: {keeper.balance() / 1e18:.4f} ETH")
|
nilq/baby-python
|
python
|
from typing import Any
import pandas as pd
from anubis.models import Submission, Assignment
from anubis.utils.cache import cache
def get_submissions(course_id: str) -> pd.DataFrame:
"""
Get all submissions from visible assignments, and put them in a dataframe
:return:
"""
# Get the submission sqlalchemy objects
raw_submissions = (
Submission.query.join(Assignment)
.filter(
Assignment.hidden == False,
Assignment.course_id == course_id,
)
.all()
)
# Specify which columns we want
columns = ["id", "owner_id", "assignment_id", "processed", "created"]
# Build a dataframe of from the columns we pull out of each submission object
submissions = pd.DataFrame(
data=list(
map(
lambda x: ({column: getattr(x, column) for column in columns}),
raw_submissions,
)
),
columns=columns,
)
# Round the submission timestamps to the nearest hour
submissions["created"] = submissions["created"].apply(lambda date: pd.to_datetime(date).round("H"))
return submissions
@cache.memoize(timeout=360)
def get_raw_submissions() -> list[dict[str, Any]]:
submissions_df = get_submissions()
data = (
submissions_df.groupby(["assignment_id", "created"])["id"]
.count()
.reset_index()
.rename(columns={"id": "count"})
.to_dict()
)
data["created"] = {k: str(v) for k, v in data["created"].items()}
assignment_ids = list(set(data["assignment_id"].values()))
response = {}
for assignment_id in assignment_ids:
assignment = Assignment.query.filter(Assignment.id == assignment_id).first()
response[assignment_id] = {
"data": [],
"name": assignment.name,
"release_date": str(assignment.release_date),
"due_date": str(assignment.due_date),
}
for index, assignment_id in data["assignment_id"].items():
response[assignment_id]["data"].append(
{
"x": data["created"][index],
"y": data["count"][index],
"label": f"{data['created'][index]} {data['count'][index]}",
}
)
return list(response.values())
|
nilq/baby-python
|
python
|
# jsb/socklib/partyline.py
#
#
""" provide partyline functionality .. manage dcc sockets. """
__copyright__ = 'this file is in the public domain'
__author__ = 'Aim'
## jsb imports
from jsb.lib.fleet import getfleet
from jsb.utils.exception import handle_exception
from jsb.lib.threads import start_new_thread
from jsb.imports import getjson
json = getjson()
## basic imports
import thread
import pickle
import socket
import logging
## classes
class PartyLine(object):
""" partyline can be used to talk through dcc chat connections. """
def __init__(self):
self.socks = [] # partyline sockets list
self.jids = []
self.lock = thread.allocate_lock()
def size(self):
return len(self.socks)
def resume(self, sessionfile):
""" resume bot from session file. """
try:
session = json.load(open(sessionfile, 'r'))
self._resume(session)
except: handle_exception()
def _resume(self, data, reto=None):
""" resume a party line connection after reboot. """
fleet = getfleet()
for i in data['partyline']:
logging.warn("partyline - resuming %s" % i)
bot = fleet.byname(i['botname'])
if not bot: logging.error("partyline - can't find bot") ; continue
sock = socket.fromfd(i['fileno'], socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(1)
nick = i['nick']
userhost = i['userhost']
channel = i['channel']
if not bot:
logging.error("partyline - can't find %s bot in fleet" % i['botname'])
continue
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, 'userhost': userhost, 'channel': channel, 'silent': i['silent']})
bot._dccresume(sock, nick, userhost, channel)
if reto: self.say_nick(nick, 'rebooting done')
def _resumedata(self):
""" return data used for resume. """
result = []
for i in self.socks: result.append({'botname': i['bot'].cfg.name, 'fileno': i['sock'].fileno(), 'nick': i['nick'], 'userhost': i['userhost'], 'channel': i['channel'], 'silent': i['silent']})
return result
def stop(self, bot):
""" stop all users on bot. """
for i in self.socks:
if i['bot'] == bot:
try:
i['sock'].shutdown(2)
i['sock'].close()
except: pass
def stop_all(self):
""" stop every user on partyline. """
for i in self.socks:
try:
i['sock'].shutdown(2)
i['sock'].close()
except:
pass
def loud(self, nick):
""" enable broadcasting of txt for nick. """
for i in self.socks:
if i['nick'] == nick: i['silent'] = False
def silent(self, nick):
""" disable broadcasting txt from/to nick. """
for i in self.socks:
if i['nick'] == nick: i['silent'] = True
def add_party(self, bot, sock, nick, userhost, channel):
''' add a socket with nick to the list. '''
for i in self.socks:
if i['sock'] == sock: return
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, 'userhost': userhost, 'channel': channel, 'silent': False})
logging.warn("partyline - added user %s" % nick)
def del_party(self, nick):
''' remove a socket with nick from the list. '''
nick = nick.lower()
self.lock.acquire()
try:
for socknr in range(len(self.socks)-1, -1, -1):
if self.socks[socknr]['nick'].lower() == nick: del self.socks[socknr]
logging.debug('partyline - removed user %s' % nick)
finally: self.lock.release()
def list_nicks(self):
''' list all connected nicks. '''
result = []
for item in self.socks: result.append(item['nick'])
return result
def say_broadcast(self, txt):
''' broadcast a message to all ppl on partyline. '''
for item in self.socks:
if not item['silent']: item['sock'].send("%s\n" % txt)
def say_broadcast_notself(self, nick, txt):
''' broadcast a message to all ppl on partyline, except the sender. '''
nick = nick.lower()
for item in self.socks:
if item['nick'] == nick: continue
if not item['silent']: item['sock'].send("%s\n" % txt)
def say_nick(self, nickto, msg):
''' say a message on the partyline to an user. '''
nickto = nickto.lower()
for item in self.socks:
if item['nick'].lower() == nickto:
if not '\n' in msg: msg += "\n"
item['sock'].send("%s" % msg)
return
def is_on(self, nick):
''' checks if user an is on the partyline. '''
nick = nick.lower()
for item in self.socks:
if item['nick'].lower() == nick: return True
return False
## global partyline object
partyline = PartyLine()
def size():
return partyline.size()
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from builtins import str
import six
@six.python_2_unicode_compatible
class TokenSet:
"""
A token set is used to store the unique list of all tokens
within an index. Token sets are also used to represent an
incoming query to the index, this query token set and index
token set are then intersected to find which tokens to look
up in the inverted index.
A token set can hold multiple tokens, as in the case of the
index token set, or it can hold a single token as in the
case of a simple query token set.
Additionally token sets are used to perform wildcard matching.
Leading, contained and trailing wildcards are supported, and
from this edit distance matching can also be provided.
Token sets are implemented as a minimal finite state automata,
where both common prefixes and suffixes are shared between tokens.
This helps to reduce the space used for storing the token set.
TODO: consider https://github.com/glyph/automat
"""
_next_id = 1
def __init__(self):
self.final = False
self.edges = {}
self.id = self._next_id
self.__class__._next_id += 1
def __str__(self):
try:
return self._string
except AttributeError:
pass
string = "1" if self.final else "0"
for label in sorted(list(self.edges.keys())):
node = self.edges[label]
try:
node_id = str(node.id)
except AttributeError:
# TODO: JS seems to rely on undefined for the id attribute?
node_id = ""
string = string + label + node_id
return string
def __repr__(self):
return '<TokenSet "{}">'.format(str(self))
@classmethod
def from_string(self, string):
"""Creates a TokenSet from a string.
The string may contain one or more wildcard characters (*) that will
allow wildcard matching when intersecting with another TokenSet
"""
node = TokenSet()
root = node
# Iterates throough all characters in the passed string appending
# a node for each character.
# When a wildcard character is found then a self referencing edge
# is introduced to continually match any number of characters
for i, char in enumerate(string):
final = i == len(string) - 1
if char == "*":
node.edges[char] = node
node.final = final
else:
next_ = TokenSet()
next_.final = final
node.edges[char] = next_
node = next_
return root
@classmethod
def from_fuzzy_string(cls, string, edit_distance):
"""Creates a token set representing a single string with a specified
edit distance.
Insertions, deletions, substitutions and transpositions are each
treated as an edit distance of 1.
Increasing the allowed edit distance will have a dramatic impact
on the performance of both creating and intersecting these TokenSets.
It is advised to keep the edit distance less than 3.
"""
root = TokenSet()
stack = [{"node": root, "edits_remaining": edit_distance, "string": string}]
while stack:
frame = stack.pop()
# no edit
if len(frame["string"]) > 0:
char = frame["string"][0]
no_edit_node = None
if char in frame["node"].edges:
no_edit_node = frame["node"].edges[char]
else:
no_edit_node = TokenSet()
frame["node"].edges[char] = no_edit_node
if len(frame["string"]) == 1:
no_edit_node.final = True
stack.append(
{
"node": no_edit_node,
"edits_remaining": frame["edits_remaining"],
"string": frame["string"][1:],
}
)
if frame["edits_remaining"] == 0:
continue
# insertion, can only do insertion if there are edits remaining
if "*" in frame["node"].edges:
insertion_node = frame["node"].edges["*"]
else:
insertion_node = TokenSet()
frame["node"].edges["*"] = insertion_node
if len(frame["string"]) == 0:
insertion_node.final = True
stack.append(
{
"node": insertion_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"],
}
)
# deletion, can only do a deletion if we have enough edits
# remaining and if there are characters left to delete in the string
if len(frame["string"]) > 1:
stack.append(
{
"node": frame["node"],
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"][1:],
}
)
# deletion, just removing the last character of the string
if len(frame["string"]) == 1:
frame["node"].final = True
# substitution, can only do a substitution if we have enough edits
# remaining and there are characters left to substitute
if len(frame["string"]) >= 1:
if "*" in frame["node"].edges:
substitution_node = frame["node"].edges["*"]
else:
substitution_node = TokenSet()
frame["node"].edges["*"] = substitution_node
if len(frame["string"]) == 1:
substitution_node.final = True
stack.append(
{
"node": substitution_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"][1:],
}
)
# transposition, can only do a transposition if there are edits
# remaining and there are enough characters to transpose
if frame["edits_remaining"] and len(frame["string"]) > 1:
char_a = frame["string"][0]
char_b = frame["string"][1]
transpose_node = None
if char_b in frame["node"].edges:
transpose_node = frame["node"].edges[char_b]
else:
transpose_node = TokenSet()
frame["node"].edges[char_b] = transpose_node
if len(frame["string"]) == 1:
transpose_node.final = True
stack.append(
{
"node": transpose_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": char_a + frame["string"][2:],
}
)
return root
@classmethod
def from_list(cls, list_of_words):
from lunr.token_set_builder import TokenSetBuilder
builder = TokenSetBuilder()
for word in list_of_words:
builder.insert(word)
builder.finish()
return builder.root
@classmethod
def from_clause(cls, clause):
if clause.edit_distance:
return cls.from_fuzzy_string(clause.term, clause.edit_distance)
else:
return cls.from_string(clause.term)
def to_list(self):
words = []
stack = [{"prefix": "", "node": self}]
while stack:
frame = stack.pop()
if frame["node"].final:
words.append(frame["prefix"])
for edge in frame["node"].edges.keys():
stack.append(
{
"prefix": frame["prefix"] + str(edge),
"node": frame["node"].edges[edge],
}
)
return words
def intersect(self, other):
"""Returns a new TokenSet that is the intersection of this TokenSet
and the passed TokenSet.
This intersection will take into account any wildcards contained within
the TokenSet.
"""
output = TokenSet()
stack = [{"node": self, "q_node": other, "output": output}]
while stack:
frame = stack.pop()
for q_edge in frame["q_node"].edges.keys():
for n_edge in frame["node"].edges.keys():
if n_edge == q_edge or q_edge == "*":
node = frame["node"].edges[n_edge]
q_node = frame["q_node"].edges[q_edge]
final = node.final and q_node.final
next_ = None
if n_edge in frame["output"].edges:
next_ = frame["output"].edges[n_edge]
next_.final = next_.final or final
else:
next_ = TokenSet()
next_.final = final
frame["output"].edges[n_edge] = next_
stack.append({"node": node, "q_node": q_node, "output": next_})
return output
|
nilq/baby-python
|
python
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import pandas as pd
class OffsetScaler(object):
@staticmethod
def create_normalizing_scaler(dataframe):
"""
Creates a scaling object that normalizes the data between 0 and 1
Args:
dataframe: pandas DataFrame
The dataframe containing the data (usually the training data)
that will be used to compute the scaling factor and offset
"""
expected_columns = list(dataframe.columns)
offset = dataframe.min()
factor = dataframe.max() - dataframe.min()
return OffsetScaler(expected_columns, offset, factor)
@staticmethod
def create_from_mean_std(dataframe):
"""
Creates a scaling object using the mean as the offset and the
standard devation as the as the factor
Args:
dataframe: pandas DataFrame
The dataframe containing the data (usually the training data) that will
be used to compute the mean and standard devation for the scaler
"""
expected_columns = list(dataframe.columns)
offset = dataframe.mean()
factor = dataframe.std()
return OffsetScaler(expected_columns, offset, factor)
def __init__(self, expected_columns, offset_series, factor_series):
"""
This scaling object shifts by the offset and then scales the result
by the factor. Typically, one would create this with the static method
create_from_dataframe.
scaled_data = (data-offset)/factor
Args:
expected_columns: list of str
list of strings indicating the names of the columns in offset_series,
factor_series, and the dataframe passed to scale and unscale.
offset_series: pandas Series
Series with columns (or labels) the same as expected_columns and
values that represent the offset to be used when shifting the data
factor_series: pandas Series
Series with columns (or labels) the same as expected_columns and
values that represent the factor to be used to scale the shifted data
"""
self._expected_columns = expected_columns
self._offset = offset_series
self._factor = factor_series
if list(offset_series.index) != expected_columns:
raise ValueError(
"OffsetScaler was passed an offset series with an index that"
" does not match expected_columns. Please make sure these labels match."
)
if list(factor_series.index) != expected_columns:
raise ValueError(
"OffsetScaler was passed a factor series with an index that"
" does not match expected_columns. Please make sure these labels match."
)
def _verify_columns_match(self, dataframe):
if self._expected_columns != list(dataframe.columns):
raise ValueError(
"OffsetScaler was passed a dataframe that did not contain"
" the same column labels as those used to create the scaler."
" Please make sure the column labels match."
)
def scale(self, dataframe):
"""
Return a new dataframe where the values are scaled according to the
offset and factor
Args:
dataframe: pandas Dataframe
The dataframe to be scaled
Returns: pandas DataFrame
"""
self._verify_columns_match(dataframe)
df = dataframe - self._offset
df = df.divide(self._factor)
return df
def unscale(self, dataframe):
"""
Return a new dataframe where the values are unscaled according to the
offset and factor
Args:
dataframe: pandas Dataframe
The dataframe to be unscaled
Returns: pandas DataFrame
"""
self._verify_columns_match(dataframe)
df = dataframe.multiply(self._factor)
df = df + self._offset
return df
def expected_columns(self):
"""
Return the expected column names for the scaler series objects
"""
return self._expected_columns
def offset_series(self):
"""
Return the offset for the scaler as a pandas Series object
"""
return self._offset
def factor_series(self):
"""
Return the factors for the scaler as a pandas Series object
"""
return self._factor
def to_dict(self):
"""
Returns a dictionary representation of this scaler
"""
d = dict()
d["expected_columns"] = list(self._expected_columns)
d["offset"] = self._offset.to_dict()
d["factor"] = self._factor.to_dict()
return d
@staticmethod
def from_dict(d):
"""
Create an instance of this scaler from a dictionary
(that was created with to_dict)
Args:
d : dict
The dict created with to_dict
Returns: new OffsetScaler
"""
expected_columns = d["expected_columns"]
offset = pd.Series(d["offset"])
factor = pd.Series(d["factor"])
return OffsetScaler(expected_columns, offset, factor)
|
nilq/baby-python
|
python
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import random
def busqueda_local(solucion_inicial, evaluacion, obtener_vecinos,
T_max, T_min, reduccion):
"""
Simulated Annealing.
"""
from random import random
solucion_mejor = solucion_actual = solucion_inicial
evaluacion_mejor = evaluacion_actual = evaluacion(solucion_actual)
soluciones_evaluadas = 1
T = T_max
while T >= T_min:
vecinos = obtener_vecinos(solucion_actual)
for vecino in vecinos:
evaluacion_vecino = evaluacion(vecino)
soluciones_evaluadas += 1
if (evaluacion_vecino > evaluacion_actual or
random() < np.exp((evaluacion_vecino - evaluacion_actual) / T)):
solucion_actual = vecino
evaluacion_actual = evaluacion_vecino
if evaluacion_mejor < evaluacion_actual:
solucion_mejor = solucion_actual
evaluacion_mejor = evaluacion_actual
T = reduccion * T
return solucion_mejor, soluciones_evaluadas
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
from threading import Thread
import pymysql
from Sqlcore import Sqlcore
'''
入口功能
'''
'''vmode打印显示模式full/standard/simple'''
#生产环境的photo相册
conn = {"hostname":"xxxxxx.mysql.rds.aliyuncs.com","username":"forpython","password":"Ffu398fh_GFUPY","database":"dbuser","hostport":3708}
cfgs = {"table":"bota_photo", "column":"corver,gallery", "offset":0, "vmode":"full", "limit":5, "sleep": 1}
#本机的photo相册
conn = {"hostname":"localhost","username":"website","password":"website123","database":"dbuser","hostport":3306}
cfgs = {"table":"bota_photo", "column":"corver,gallery", "offset":0, "vmode":"full", "limit":5, "sleep": 0}
#去哪儿酒店的配置
#conn = {"hostname":"192.168.1.11","username":"website","password":"website123","database":"dbuser","hostport":3306}
#cfgs = {"table":"bota_hotel", "column":"cover,photos", "offset":0, "vmode":"simple", "limit":50, "sleep": 5}
'''开始执行核心工作'''
def runcore(uniq, conn, cfgs):
sc = Sqlcore(uniq, conn, cfgs)
sc.run()
'''结果集总数'''
def totalcount(conn, table):
db = pymysql.connect(conn['hostname'], conn['username'], conn['password'], conn['database'], conn['hostport'])
cursor = db.cursor()
cursor.execute("SELECT count(*) FROM `{table}`".format(table=table))
#cursor.fetchone()
data = cursor.fetchone()
#print(data[0])
return int(data[0])#type(data[0])
if __name__ == "__main__":
print("welcome")
total = totalcount(conn, cfgs["table"])
print("发现数据库表"+cfgs["table"]+"存在总数:"+str(total)+"条记录")
while True:
tnum = input("开启线程数,最少1个,最多10个,exit退出:")
if tnum == "exit":
exit()
if tnum.isdigit():
tnum = int(tnum)
if tnum <1:
raise ValueError("输入的必须是正整数啊")
elif tnum > 50:
print("线程数不能够超过50啊~")
elif total < tnum:
print("结果集还没有线程多~")
else:
break
else:
print("输入的必须是1以上的数字啊~")
if tnum == 1:
#单线程
runcore("ONETHREAD", conn, cfgs)
else:
#多线程
one = total//tnum #每个线程的开始游标
tlist = []
for i in range(tnum):
cfg = cfgs.copy()
cfg["offset"] = one * i
#print(cfgs)
t = Thread(target=runcore, args=("#t"+str(i), conn, cfg))
tlist.append(t)
t.start()
#print("\n" + t.getName())#获取线程名
for i in tlist:
i.join()#阻塞主线程,当前执行完再执行下一步
print("allDownloaded")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import os
import shutil
def cleanup_dump(dumpstr):
cardfrags = dumpstr.split('\n\n')
if len(cardfrags) < 4:
return ''
else:
return '\n\n'.join(cardfrags[2:-1]) + '\n\n'
def identify_checkpoints(basedir, ident):
cp_infos = []
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if not os.path.isfile(fullpath):
continue
if not (path[:13] == 'lm_lstm_epoch' and path[-4:] == '.txt'):
continue
if not ident in path:
continue
# attempt super hacky parsing
inner = path[13:-4]
halves = inner.split('_')
if not len(halves) == 2:
continue
parts = halves[1].split('.')
if not len(parts) == 6:
continue
# lm_lstm_epoch[25.00_0.3859.t7.output.1.0].txt
if not parts[3] == ident:
continue
epoch = halves[0]
vloss = '.'.join([parts[0], parts[1]])
temp = '.'.join([parts[4], parts[5]])
cpname = 'lm_lstm_epoch' + epoch + '_' + vloss + '.t7'
cp_infos += [(fullpath, os.path.join(basedir, cpname),
(epoch, vloss, temp))]
return cp_infos
def process_dir(basedir, targetdir, ident, copy_cp = False, verbose = False):
(basepath, basedirname) = os.path.split(basedir)
if basedirname == '':
(basepath, basedirname) = os.path.split(basepath)
cp_infos = identify_checkpoints(basedir, ident)
for (dpath, cpath, (epoch, vloss, temp)) in cp_infos:
if verbose:
print(('found dumpfile ' + dpath))
dname = basedirname + '_epoch' + epoch + '_' + \
vloss + '.' + ident + '.' + temp + '.txt'
cname = basedirname + '_epoch' + epoch + '_' + vloss + '.t7'
tdpath = os.path.join(targetdir, dname)
tcpath = os.path.join(targetdir, cname)
if verbose:
print((' cpx ' + dpath + ' ' + tdpath))
with open(dpath, 'rt') as infile:
with open(tdpath, 'wt') as outfile:
outfile.write(cleanup_dump(infile.read()))
if copy_cp:
if os.path.isfile(cpath):
if verbose:
print((' cp ' + cpath + ' ' + tcpath))
shutil.copy(cpath, tcpath)
if copy_cp and len(cp_infos) > 0:
cmdpath = os.path.join(basedir, 'command.txt')
tcmdpath = os.path.join(targetdir, basedirname + '.command')
if os.path.isfile(cmdpath):
if verbose:
print((' cp ' + cmdpath + ' ' + tcmdpath))
shutil.copy(cmdpath, tcmdpath)
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if os.path.isdir(fullpath):
process_dir(fullpath, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
def main(basedir, targetdir, ident = 'output', copy_cp = False, verbose = False):
process_dir(basedir, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('basedir', #nargs='?'. default=None,
help='base rnn directory, must contain sample.lua')
parser.add_argument('targetdir', #nargs='?', default=None,
help='checkpoint directory, all subdirectories will be processed')
parser.add_argument('-c', '--copy_cp', action='store_true',
help='copy checkpoints used to generate the output files')
parser.add_argument('-i', '--ident', action='store', default='output',
help='identifier to look for to determine checkpoints')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
main(args.basedir, args.targetdir, ident=args.ident, copy_cp=args.copy_cp, verbose=args.verbose)
exit(0)
|
nilq/baby-python
|
python
|
a, b = map(int, input())
if a <= 9 and b <= 9:
ans = a * b
else:
ans = -1
print(ans)
|
nilq/baby-python
|
python
|
# Test the frozen module defined in frozen.c.
from test.test_support import TestFailed
import sys, os
try:
import __hello__
except ImportError, x:
raise TestFailed, "import __hello__ failed:" + str(x)
try:
import __phello__
except ImportError, x:
raise TestFailed, "import __phello__ failed:" + str(x)
try:
import __phello__.spam
except ImportError, x:
raise TestFailed, "import __phello__.spam failed:" + str(x)
if sys.platform != "mac": # On the Mac this import does succeed.
try:
import __phello__.foo
except ImportError:
pass
else:
raise TestFailed, "import __phello__.foo should have failed"
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.6 on 2021-08-21 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('education', '0021_alter_materialblocks_icon'),
]
operations = [
migrations.AlterField(
model_name='materialblocks',
name='icon',
field=models.CharField(choices=[('bolt', 'Молния'), ('apple-alt', 'Яблоко'), ('balance-scale-left', 'Весы'), ('brain', 'Мозг'), ('check', 'Галочка'), ('cloud', 'Облако'), ('compass', 'Компас'), ('dev', 'dev'), ('git', 'git'), ('lemon', 'Лимон')], default='fa-bolt', max_length=32),
),
]
|
nilq/baby-python
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING
from openhab_creator import _
from openhab_creator.models.common import MapTransformation
from openhab_creator.models.configuration.equipment.types.sensor import (
Sensor, SensorType)
from openhab_creator.models.items import (DateTime, Group, GroupType, Number,
PointType, ProfileType, PropertyType,
String)
from openhab_creator.output.items import ItemsCreatorPipeline
from openhab_creator.output.items.baseitemscreator import BaseItemsCreator
if TYPE_CHECKING:
from openhab_creator.models.configuration import Configuration
from openhab_creator.models.configuration.location import Location
@ItemsCreatorPipeline(9)
class SensorItemsCreator(BaseItemsCreator):
def __init__(self, outputdir: str):
super().__init__(outputdir)
self.sensors = {}
def build(self, configuration: Configuration) -> None:
self._build_groups()
for sensor in configuration.equipment.equipment('sensor'):
location = sensor.location
area = location.area
if area not in self.sensors:
self.sensors[area] = {}
self.build_sensor(sensor)
if sensor.has_subequipment:
for subsensor in sensor.subequipment:
if sensor.category != 'sensor':
self.build_sensor(subsensor)
self.build_sensortype_area(subsensor)
else:
self.build_sensortype_area(sensor)
self.write_file('sensors')
def _build_groups(self) -> None:
Group('Trend')\
.append_to(self)
Group('Average7d')\
.append_to(self)
Group('PressureSealevel')\
.append_to(self)
for sensortype in SensorType:
Group(f'{sensortype}All')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.page)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}')\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}All')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}')\
.append_to(self)
def build_sensor(self, sensor: Sensor) -> None:
sensor_equipment = Group(sensor.item_ids.sensor)\
.semantic('Sensor')
if sensor.sensor_is_subequipment:
sensor_equipment\
.label(_('Sensor'))\
.equipment(sensor)
else:
sensor_equipment\
.label(sensor.name_with_type)\
.location(sensor.location)
sensor_equipment.append_to(self)
def build_sensortype_area(self, sensor: Sensor) -> None:
area = sensor.location.area
for sensortype in SensorType:
if sensortype.point in sensor.categories:
if sensortype not in self.sensors[area]:
self.sensors[area][sensortype] = {}
Group(f'{sensortype}{area}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'{sensortype}All')\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}{area}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'gui{sensortype}All')\
.append_to(self)
self.build_sensortype_location(sensortype, sensor)
def build_sensortype_location(self, sensortype: SensorType, sensor: Sensor) -> None:
location = sensor.location
area = location.area
if location not in self.sensors[area][sensortype]:
self.sensors[area][sensortype][location] = True
Group(f'{sensortype}{location}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'{sensortype}{area}')\
.location(location)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.append_to(self)
if sensortype.labels.has_gui_factor:
Group(f'gui{sensortype}{location}')\
.typed(GroupType.NUMBER_AVG)\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(f'gui{sensortype}{area}')\
.location(location)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.append_to(self)
sensor_item = Number(f'{sensortype}{sensor.item_ids.merged_sensor}')\
.typed(sensortype.typed.number)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.channel(sensor.points.channel(sensortype.point))\
.aisensor()
if sensortype == SensorType.MOISTURE:
sensor_item\
.scripting({
'reminder_item': sensor.item_ids.moisturelastreminder,
'watered_item': sensor.item_ids.moisturelastwatered
})\
.sensor(sensortype.point, sensor.influxdb_tags)\
.groups(f'{sensortype}{location}')
self.moisture_items(sensor)
elif sensortype == SensorType.PRESSURE and sensor.has_altitude:
sensor_item\
.scripting({
'pressure_sealevel_item': sensor.item_ids.pressure_sealevel,
'altitude': sensor.altitude
})\
.groups('PressureSealevel')
Number(f'pressureSeaLevel{sensor.item_ids.merged_sensor}')\
.typed(sensortype.typed.number)\
.label(sensortype.labels.item)\
.format(sensortype.labels.format_str)\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor, f'{sensortype}{location}')\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.sensor(sensortype.point, sensor.influxdb_tags)\
.append_to(self)
else:
sensor_item\
.sensor(sensortype.point, sensor.influxdb_tags)\
.groups(f'{sensortype}{location}')
if sensor.location.area == 'Outdoor' or sensortype == SensorType.PRESSURE:
String(f'trend{sensortype}{sensor.item_ids.merged_sensor}')\
.label(_('Trend {label}').format(label=sensortype.labels.item))\
.map(MapTransformation.TREND)\
.icon(f'trend{sensortype}')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS)\
.aisensor()\
.append_to(self)
sensor_item\
.groups('Trend')\
.scripting({
'trend_item': f'trend{sensortype}{sensor.item_ids.merged_sensor}'
})
if sensortype == SensorType.TEMPERATURE:
Number(f'average7d{sensortype}{sensor.item_ids.merged_sensor}')\
.label(_('7 days average {label}').format(label=sensortype.labels.item))\
.icon('average7d')\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS)\
.aisensor()\
.append_to(self)
sensor_item\
.groups('Average7d')\
.scripting({
'average_item': f'average7d{sensortype}{sensor.item_ids.merged_sensor}'
})
sensor_item.append_to(self)
if sensortype.labels.has_gui_factor:
String(f'gui{sensortype}{sensor.item_ids.sensor}')\
.label(sensortype.labels.item)\
.transform_js(f'gui{sensortype}')\
.icon(f'{sensortype}{area.lower()}')\
.groups(sensor.item_ids.merged_sensor, f'gui{sensortype}{location}')\
.semantic(PointType.MEASUREMENT, sensortype.typed.property)\
.channel(sensor.points.channel(sensortype.point),
ProfileType.JS, f'togui{sensortype.labels.gui_factor}.js')\
.append_to(self)
def moisture_items(self, sensor: Sensor) -> None:
DateTime(sensor.item_ids.moisturelastreminder)\
.label(_('Last watering reminder'))\
.datetime()\
.config()\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS, PropertyType.TIMESTAMP)\
.scripting({
'message': _('The plant {plant} needs to be watered!')
.format(plant=sensor.blankname)
})\
.append_to(self)
DateTime(sensor.item_ids.moisturelastwatered)\
.label(_('Last watered'))\
.dateonly_weekday()\
.icon('wateringcan')\
.config()\
.groups(sensor.item_ids.merged_sensor)\
.semantic(PointType.STATUS, PropertyType.TIMESTAMP)\
.scripting({
'message': _('The plant {plant} says thank you for watering!')
.format(plant=sensor.blankname)
})\
.append_to(self)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Library to easily manage Host Health"""
from __future__ import division
import logging
import psutil
import threading
import time
COLOR_GREEN = '#4caf50'
COLOR_ORANGE = '#ff5722'
COLOR_RED = '#f44336'
CPU_THRESHOLD_WARNING = 70
CPU_THRESHOLD_DANGER = 85
duplex_map = {
psutil.NIC_DUPLEX_FULL: "full",
psutil.NIC_DUPLEX_HALF: "half",
psutil.NIC_DUPLEX_UNKNOWN: "?",
}
def get_host_health(cpu_percent, cpu_percent_details):
status = {}
cpu = {}
cpu['percent'] = cpu_percent
cpu['percpu'] = cpu_percent_details
cpu['color'] = COLOR_GREEN
if cpu_percent > CPU_THRESHOLD_WARNING:
cpu['color'] = COLOR_ORANGE
if cpu_percent > CPU_THRESHOLD_DANGER:
cpu['color'] = COLOR_RED
cpu['label'] = get_host_cpu_label(cpu_percent)
status['cpu'] = cpu
ram = {}
# Original tuple:
# r_total, r_avail, r_percent, r_used, r_free, r_active, r_inactive, r_buffers, r_cached, r_shared, r_slab
ram_raw_values = psutil.virtual_memory()
r_total = ram_raw_values[0]
r_avail = ram_raw_values[1]
r_percent = ram_raw_values[2]
r_used = ram_raw_values[3]
r_free = ram_raw_values[4]
ram['total'] = r_total / 1024 / 1024 / 1024
ram['available'] = r_avail / 1024 / 1024 / 1024
ram['free'] = r_free / 1024 / 1024 / 1024
ram['used'] = r_used / 1024 / 1024 / 1024
ram['percent'] = r_percent
ram['color'] = COLOR_GREEN
if r_percent > 70:
ram['color'] = COLOR_ORANGE
if r_percent > 85:
ram['color'] = COLOR_RED
status['ram'] = ram
disk = {}
d_total, d_used, d_free, d_percent = psutil.disk_usage('/')
disk['total'] = d_total / 1024 / 1024 / 1024
disk['used'] = d_used / 1024 / 1024 / 1024
disk['free'] = d_free / 1024 / 1024 / 1024
disk['percent'] = d_percent
disk['color'] = COLOR_GREEN
if d_percent > 70:
disk['color'] = COLOR_ORANGE
if d_percent > 85:
disk['color'] = COLOR_RED
status['disk'] = disk
status['boot_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(psutil.boot_time()))
try:
status['network_stats'] = psutil.net_if_stats()
for (key, item) in status['network_stats'].items():
status['network_stats'][key] = status['network_stats'][key]._replace(duplex=duplex_map[item.duplex])
status['network_io'] = psutil.net_io_counters(pernic=True)
except OSError as err:
logging.error("Error while reading NIC status: {}}".format(err))
return status
def get_host_cpu_label(cpu_percent):
status = 'success'
if cpu_percent > CPU_THRESHOLD_WARNING:
status = 'warning'
if cpu_percent > CPU_THRESHOLD_DANGER:
status = 'danger'
return status
class HostHealth:
percent = 'N/A'
percent_details = 'N/A'
thread = None
pool_time = 5
interval = 2
def __init__(self, pool_time, interval):
self.pool_time = pool_time
self.interval = interval
def start(self):
self.thread = threading.Timer(self.pool_time, self.update_stats)
self.thread.start()
def update_stats(self):
self.percent = psutil.cpu_percent(interval=self.interval)
self.percent_details = psutil.cpu_percent(interval=self.interval, percpu=True)
self.start()
def get_stats(self):
return self.percent, self.percent_details
|
nilq/baby-python
|
python
|
__author__ = 'Rob Edwards'
import sys
from matrices import blosum62
def score(a, b):
"""score dna as match/mismatch"""
if a == b:
return 1
return -1
def dna_score_alignment(seq1, seq2, gap_open=11, gap_extend=1):
"""
Generate a score for an alignment between two sequences. This does not do the alignment!
:param seq1: The first sequence
:param seq2: The second sequence
:param gap_open: The gap opening penalty
:param gap_extend: The gap extension penalty
:return: An int for the best score for the alignment
"""
score_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
# initially populate the gap open/extension
for i in range(1, len(seq1)+1):
score_matrix[0][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[1][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[2][i][0] = -10*gap_open
for j in range(1, len(seq2)+1):
score_matrix[2][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[1][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[0][0][j] = -10*gap_open
for i in range(1, len(seq1)+1):
for j in range(1, len(seq2)+1):
lower_scores = [score_matrix[0][i-1][j] - gap_extend, score_matrix[1][i-1][j] - gap_open]
score_matrix[0][i][j] = max(lower_scores)
upper_scores = [score_matrix[2][i][j-1] - gap_extend, score_matrix[1][i][j-1] - gap_open]
score_matrix[2][i][j] = max(upper_scores)
mid_scores = [score_matrix[0][i][j], score_matrix[1][i-1][j-1] + score(seq1[i-1], seq2[j-1]), score_matrix[2][i][j]]
score_matrix[1][i][j] = max(mid_scores)
max_scores = [score_matrix[0][i][j], score_matrix[1][i][j], score_matrix[2][i][j]]
return max(max_scores)
def dna_gapped_alignment(seq1, seq2, gap_open=11, gap_extend=1):
"""
Perform a gapped alignment. This approach uses two, 3 dimensional matrices.
:param seq1: The first sequence
:param seq2: The second sequence
:param gap_open: The gap opening penalty (default = 11)
:param gap_extn: The gap extention penalty (default = 1)
:return: The score, and the two sequences with gaps in them
"""
score_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
backtrack_matrix = [[[0 for j in range(len(seq2)+1)] for i in range(len(seq1)+1)] for k in range(3)]
# initially populate the gap open/extension
for i in range(1, len(seq1)+1):
score_matrix[0][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[1][i][0] = -gap_open - (i-1)*gap_extend
score_matrix[2][i][0] = -10*gap_open
for j in range(1, len(seq2)+1):
score_matrix[2][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[1][0][j] = -gap_open - (j-1)*gap_extend
score_matrix[0][0][j] = -10*gap_open
for i in range(1, len(seq1)+1):
for j in range(1, len(seq2)+1):
lower_scores = [score_matrix[0][i-1][j] - gap_extend, score_matrix[1][i-1][j] - gap_open]
score_matrix[0][i][j] = max(lower_scores)
backtrack_matrix[0][i][j] = lower_scores.index(score_matrix[0][i][j])
upper_scores = [score_matrix[2][i][j-1] - gap_extend, score_matrix[1][i][j-1] - gap_open]
score_matrix[2][i][j] = max(upper_scores)
backtrack_matrix[2][i][j] = upper_scores.index(score_matrix[2][i][j])
mid_scores = [score_matrix[0][i][j], score_matrix[1][i-1][j-1] + score(seq1[i-1], seq2[j-1]), score_matrix[2][i][j]]
score_matrix[1][i][j] = max(mid_scores)
backtrack_matrix[1][i][j] = mid_scores.index(score_matrix[1][i][j])
i=len(seq1)
j=len(seq2)
output_seq1 = seq1
output_seq2 = seq2
max_scores = [score_matrix[0][i][j], score_matrix[1][i][j], score_matrix[2][i][j]]
max_score = max(max_scores)
backtrack_level = max_scores.index(max_score)
# we need this, time and again
insert_indel = lambda word, i: word[:i] + '-' + word[i:]
while i*j != 0:
if backtrack_level == 0:
if backtrack_matrix[0][i][j] == 1:
backtrack_level = 1
i -= 1
output_seq2 = insert_indel(output_seq2, j)
elif backtrack_level == 1:
if backtrack_matrix[1][i][j] == 0:
backtrack_level = 0
elif backtrack_matrix[1][i][j] == 2:
backtrack_level = 2
else:
i -= 1
j -= 1
else:
if backtrack_matrix[2][i][j] == 1:
backtrack_level = 1
j -= 1
output_seq1 = insert_indel(output_seq1, i)
for k in xrange(i):
output_seq2 = insert_indel(output_seq2, 0)
for k in xrange(j):
output_seq1 = insert_indel(output_seq1, 0)
return (max_score, output_seq1, output_seq2)
if __name__ == "__main__":
#s1 = 'ATGLVRRLGSFLVEDFSRYKLLL'
#s2 = 'ATGLGLMRRSGSPLVESRYKLL'
s1 = 'MQMCDRKHECYFEGFICDWHTLLEPHIVAQSEPYPCHKKMTQMPPPCSWFGNDIAEEKPSSIMATPAMPNVEEGM'
s2 = 'MWMKDRKKNANECDWHPLLEYHIVAQSEPYKCCKKAMLGVKGAGTQMPPPCSWFGNDIAEEKPSSIMATPAMPNWEEGM'
(score, s1, s2) = gapped_alignment(s1, s2)
print(str(score) + "\n" + s1 + "\n" + s2)
|
nilq/baby-python
|
python
|
from libs.matrix_client.matrix_client.client import MatrixClient
from libs.matrix_client.matrix_client.api import MatrixRequestError
from libs.matrix_client.matrix_client.user import User
from requests.exceptions import MissingSchema
from helpers import setup_logger
logger = setup_logger(__name__, 'info')
class Client():
def __init__(self, username, password=None, token=None, server="matrix.org"):
self.username = username
self.server = server
self.server_url = "https://{}".format(self.server)
self.token = None
self.logged_in = True
# Create the matrix client
if token == None and password != None:
self.matrix_client = MatrixClient(self.server_url)
# Try logging in the user
try:
self.token = self.matrix_client.login(username=username, password=password)
self.user = User(self.matrix_client, self.matrix_client.user_id)
except MatrixRequestError as e:
self.logged_in = False
if e.code == 403:
logger.exception("Wrong username or password")
else:
logger.exception("Check server details")
except MissingSchema as e:
logger.exception("Bad URL format")
else:
self.matrix_client = MatrixClient(self.server_url, token=token, user_id=username)
self.user = User(self.matrix_client, self.matrix_client.user_id)
# Return the user's display name
def get_user_display_name(self):
return self.user.get_display_name()
# Get the rooms of the user
def get_rooms(self):
return self.matrix_client.rooms
def get_user(self):
return self.user
def get_token(self):
return self.token
|
nilq/baby-python
|
python
|
import unittest
import uuid
from unittest.mock import patch
from microsetta_private_api.model.interested_user import InterestedUser
from microsetta_private_api.repo.interested_user_repo import InterestedUserRepo
from microsetta_private_api.repo.transaction import Transaction
from psycopg2.errors import ForeignKeyViolation
ADDRESS_1 = "9500 Gilman Dr"
ADDRESS_2 = ""
CITY = "La Jolla"
STATE = "CA"
POSTAL = "92093"
COUNTRY = "United States"
LATITUDE = "32.88003507430753"
LONGITUDE = "-117.23394724325632"
class InterestedUserRepoTests(unittest.TestCase):
def setUp(self):
self.test_campaign_title_1 = 'Test Campaign'
with Transaction() as t:
cur = t.cursor()
# create a test campaign
cur.execute(
"INSERT INTO campaign.campaigns (title) "
"VALUES (%s) "
"RETURNING campaign_id",
(self.test_campaign_title_1, )
)
self.test_campaign_id = cur.fetchone()[0]
# create necessary campaign/project relationship
cur.execute(
"INSERT INTO campaign.campaigns_projects "
"(campaign_id, project_id) "
"VALUES (%s, 1)",
(self.test_campaign_id, )
)
t.commit()
# need to create an extra, fake campaign ID
self.fake_campaign_id = None
while self.fake_campaign_id is None:
tmp_fake_campaign_id = uuid.uuid4()
if tmp_fake_campaign_id != self.test_campaign_id:
self.fake_campaign_id = str(tmp_fake_campaign_id)
def tearDown(self):
with Transaction() as t:
cur = t.cursor()
cur.execute(
"DELETE FROM campaign.campaigns_projects "
"WHERE campaign_id = %s",
(self.test_campaign_id,)
)
cur.execute(
"DELETE FROM campaign.campaigns "
"WHERE campaign_id = %s",
(self.test_campaign_id,)
)
t.commit()
def test_create_interested_user_valid(self):
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "test@testing.com"
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
obs = interested_user_repo.insert_interested_user(interested_user)
self.assertTrue(obs is not None)
def test_create_interested_user_invalid(self):
# test with a required field missing
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson"
}
with self.assertRaises(KeyError):
interested_user = InterestedUser.from_dict(dummy_user)
# test with invalid campaign ID
dummy_user = {
"campaign_id": self.fake_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "test@testing.com"
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
with self.assertRaises(ForeignKeyViolation):
interested_user_repo.insert_interested_user(interested_user)
def test_verify_address_already_verified(self):
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "test@testing.com",
"address_checked": True
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is None)
@patch("microsetta_private_api.repo.interested_user_repo.verify_address")
def test_verify_address_not_verified_is_valid(self, test_verify_address):
test_verify_address.return_value = {
"address_1": ADDRESS_1,
"address_2": ADDRESS_2,
"city": CITY,
"state": STATE,
"postal": POSTAL,
"country": COUNTRY,
"latitude": LATITUDE,
"longitude": LONGITUDE,
"valid": True
}
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "test@testing.com",
"address_1": ADDRESS_1,
"city": CITY,
"state": STATE,
"postal_code": POSTAL,
"country": COUNTRY
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is True)
@patch("microsetta_private_api.repo.interested_user_repo.verify_address")
def test_verify_address_not_verified_is_invalid(self, test_verify_address):
test_verify_address.return_value = {
"address_1": ADDRESS_1,
"address_2": ADDRESS_2,
"city": CITY,
"state": STATE,
"postal": POSTAL,
"country": COUNTRY,
"latitude": LATITUDE,
"longitude": LONGITUDE,
"valid": False
}
dummy_user = {
"campaign_id": self.test_campaign_id,
"first_name": "Test",
"last_name": "McTesterson",
"email": "test@testing.com",
"address_1": ADDRESS_1,
"city": CITY,
"state": STATE,
"postal_code": POSTAL,
"country": COUNTRY
}
interested_user = InterestedUser.from_dict(dummy_user)
with Transaction() as t:
interested_user_repo = InterestedUserRepo(t)
user_id = \
interested_user_repo.insert_interested_user(interested_user)
obs = interested_user_repo.verify_address(user_id)
self.assertTrue(obs is False)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import binascii
import socket
import struct
from twampy.utils import generate_zero_bytes, now
from twampy.constants import TIMEOFFSET, TWAMP_PORT_DEFAULT, TOS_DEFAULT, TIMEOUT_DEFAULT
import logging
logger = logging.getLogger("twampy")
class ControlClient:
def __init__(self, server, port=TWAMP_PORT_DEFAULT, timeout=TIMEOUT_DEFAULT, tos=TOS_DEFAULT, source_address=None):
self.socket = socket.create_connection((server,tcp_port), timeout, source_address)
pass
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.socket.setsockopt(ip_protocol, socket.IP_TOS, tos)
self.socket.connect((server, port))
def send(self, data):
logger.debug("CTRL.TX %s", binascii.hexlify(data))
try:
self.socket.send(data)
except Exception as e:
logger.critical('*** Sending data failed: %s', str(e))
def receive(self):
data = self.socket.recv(9216)
logger.debug("CTRL.RX %s (%d bytes)", binascii.hexlify(data), len(data))
return data
def close(self):
self.socket.close()
def connectionSetup(self):
logger.info("CTRL.RX <<Server Greeting>>")
data = self.receive()
self.smode = struct.unpack('!I', data[12:16])[0]
logger.info("TWAMP modes supported: %d", self.smode)
if self.smode & 1 == 0:
logger.critical('*** TWAMPY only supports unauthenticated mode(1)')
logger.info("CTRL.TX <<Setup Response>>")
self.send(struct.pack('!I', 1) + generate_zero_bytes(160))
logger.info("CTRL.RX <<Server Start>>")
data = self.receive()
rval = ord(data[15])
if rval != 0:
# TWAMP setup request not accepted by server
logger.critical("*** ERROR CODE %d in <<Server Start>>", rval)
self.nbrSessions = 0
def reqSession(self, sender="", s_port=20001, receiver="", r_port=20002, startTime=0, timeOut=3, dscp=0, padding=0):
typeP = dscp << 24
if startTime != 0:
startTime += now() + TIMEOFFSET
if sender == "":
request = struct.pack('!4B L L H H 13L 4ILQ4L', 5, 4, 0, 0, 0, 0, s_port, r_port, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
elif sender == "::":
request = struct.pack('!4B L L H H 13L 4ILQ4L', 5, 6, 0, 0, 0, 0, s_port, r_port, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
elif ':' in sender:
s = socket.inet_pton(socket.AF_INET6, sender)
r = socket.inet_pton(socket.AF_INET6, receiver)
request = struct.pack('!4B L L H H 16s 16s 4L L 4ILQ4L', 5, 6, 0, 0, 0, 0, s_port, r_port, s, r, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
else:
s = socket.inet_pton(socket.AF_INET, sender)
r = socket.inet_pton(socket.AF_INET, receiver)
request = struct.pack('!4B L L H H 16s 16s 4L L 4ILQ4L', 5, 4, 0, 0, 0, 0, s_port, r_port, s, r, 0, 0, 0, 0, padding, startTime, 0, timeOut, 0, typeP, 0, 0, 0, 0, 0)
logger.info("CTRL.TX <<Request Session>>")
self.send(request)
logger.info("CTRL.RX <<Session Accept>>")
data = self.receive()
rval = ord(data[0])
if rval != 0:
logger.critical("ERROR CODE %d in <<Session Accept>>", rval)
return False
return True
def startSessions(self):
request = struct.pack('!B', 2) + generate_zero_bytes(31)
logger.info("CTRL.TX <<Start Sessions>>")
self.send(request)
logger.info("CTRL.RX <<Start Accept>>")
self.receive()
def stopSessions(self):
request = struct.pack('!BBHLQQQ', 3, 0, 0, self.nbrSessions, 0, 0, 0)
logger.info("CTRL.TX <<Stop Sessions>>")
self.send(request)
self.nbrSessions = 0
|
nilq/baby-python
|
python
|
from helpers import test_tools
from graph import GraphRoutingProblem
from dungeon import DungeonProblem
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import APITestCase
class DeleteUserIdentityTest(APITestCase):
def test_simple(self):
user = self.create_user(email="a@example.com")
org = self.create_organization(owner=user)
auth_provider = AuthProvider.objects.create(organization=org, provider="dummy")
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider, ident=user.email, user=user
)
self.login_as(user=user)
url = reverse(
"sentry-api-0-user-identity-details",
kwargs={"user_id": user.id, "identity_id": auth_identity.id},
)
resp = self.client.delete(url, format="json")
assert resp.status_code == 204, resp.content
assert not AuthIdentity.objects.filter(id=auth_identity.id).exists()
|
nilq/baby-python
|
python
|
#
# build the vocab/dictionary from outside to all related lexicons
from __future__ import print_function
import os
import sys
import argparse
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from neuronlp2 import utils
from neuronlp2.io import get_logger, conllx_stacked_data
#
# Only use for building multi-lingual vocabs, this is only a simple workaround.
# However, we might also want multi-lingual embeddings before training for convenience.
# Usage:
# python2 examples/vocab/build_vocab.py --word_embedding <embde-type> --word_paths [various languages' embeddings: e1 e2 ...]
# --train <english-train-file> --extra [various languages' test-files: ... ] --model_path <path>
#
def parse_cmd(args):
args_parser = argparse.ArgumentParser(description='Building the alphabets/vocabularies.')
#
args_parser.add_argument('--word_embedding', type=str, choices=['word2vec', 'glove', 'senna', 'sskip', 'polyglot'],
help='Embedding for words', required=True)
args_parser.add_argument('--word_paths', type=str, nargs='+', help='path for word embedding dict', required=True)
args_parser.add_argument('--train', type=str, help="The main file to build vocab.", required=True)
args_parser.add_argument('--extra', type=str, nargs='+', help="Extra files to build vocab, usually dev/tests.",
required=True)
args_parser.add_argument('--model_path', help='path for saving model file.', required=True)
res = args_parser.parse_args(args)
return res
def _get_keys(wd):
try:
return wd.keys()
except:
# Word2VecKeyedVectors
return wd.vocab.keys()
# todo(warn): if not care about the specific language of the embeddings
def combine_embeds(word_dicts):
num_dicts = len(word_dicts)
count_ins, count_repeats = [0 for _ in range(num_dicts)], [0 for _ in range(num_dicts)]
res = dict()
for idx, one in enumerate(word_dicts):
for k in _get_keys(one):
if k in res:
count_repeats[idx] += 1
else:
count_ins[idx] += 1
res[k] = 0
return res, count_ins, count_repeats
def main(a=None):
if a is None:
a = sys.argv[1:]
args = parse_cmd(a)
# if output directory doesn't exist, create it
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
logger = get_logger("VocabBuilder", args.model_path + '/vocab.log.txt')
logger.info('\ncommand-line params : {0}\n'.format(sys.argv[1:]))
logger.info('{0}\n'.format(args))
# load embeds
logger.info("Load embeddings")
word_dicts = []
word_dim = None
for one in args.word_paths:
one_word_dict, one_word_dim = utils.load_embedding_dict(args.word_embedding, one)
assert word_dim is None or word_dim == one_word_dim, "Embedding size not matched!"
word_dicts.append(one_word_dict)
word_dim = one_word_dim
# combine embeds
combined_word_dict, count_ins, count_repeats = combine_embeds(word_dicts)
logger.info("Final embeddings size: %d." % len(combined_word_dict))
for one_fname, one_count_ins, one_count_repeats in zip(args.word_paths, count_ins, count_repeats):
logger.info(
"For embed-file %s, count-in: %d, repeat-discard: %d." % (one_fname, one_count_ins, one_count_repeats))
# create vocabs
logger.info("Creating Alphabets")
alphabet_path = os.path.join(args.model_path, 'alphabets/')
assert not os.path.exists(alphabet_path), "Alphabet path exists, please build with a new path."
word_alphabet, char_alphabet, pos_alphabet, type_alphabet, max_sent_length = conllx_stacked_data.create_alphabets(
alphabet_path, args.train, data_paths=args.extra, max_vocabulary_size=100000, embedd_dict=combined_word_dict)
# printing info
num_words = word_alphabet.size()
num_chars = char_alphabet.size()
num_pos = pos_alphabet.size()
num_types = type_alphabet.size()
logger.info("Word Alphabet Size: %d" % num_words)
logger.info("Character Alphabet Size: %d" % num_chars)
logger.info("POS Alphabet Size: %d" % num_pos)
logger.info("Type Alphabet Size: %d" % num_types)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Entrypoint of the metadata_service package.
"""
from typing import Optional
import typer
from ghga_service_chassis_lib.api import run_server
from metadata_service.config import get_config
from metadata_service.api import app # noqa: F401 pylint: disable=unused-import
def run(
config: Optional[str] = typer.Option(None, help="Path to config yaml.")
) -> None:
"""
Start the backend server.
Args:
config: The path to the application configuration
"""
run_server(app="metadata_service.__main__:app", config=get_config())
def run_cli() -> None:
"""
Command line interface for running the server.
"""
typer.run(run)
if __name__ == "__main__":
run_cli()
|
nilq/baby-python
|
python
|
def ones(l):
#return l + [x + '_1' for x in l]
#return sorted(l + [x + '_1' for x in l])
ret = []
for x in l:
ret.append(x)
ret.append(x + '_1')
return ret
# The complete primitive sets
ffprims_fall = ones(
[
'FD',
'FDC',
'FDCE',
'FDE',
'FDP',
'FDPE',
'FDR',
'FDRE',
'FDS',
'FDSE',
])
ffprims_lall = ones([
'LDC',
'LDCE',
'LDE',
'LDPE',
'LDP',
])
# Base primitives
ffprims_f = [
'FDRE',
'FDSE',
'FDCE',
'FDPE',
]
ffprims_l = [
'LDCE',
'LDPE',
]
ffprims = ffprims_f + ffprims_l
def isff(prim):
return prim.startswith("FD")
def isl(prim):
return prim.startswith("LD")
ff_bels_5 = [
'A5FF',
'B5FF',
'C5FF',
'D5FF',
]
ff_bels_ffl = [
'AFF',
'BFF',
'CFF',
'DFF',
]
ff_bels = ff_bels_ffl + ff_bels_5
#ff_bels = ff_bels_ffl
|
nilq/baby-python
|
python
|
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'django-pimp-my-filter',
version = '0.1.1',
packages = find_packages(),
include_package_data = True,
license = 'BSD License',
description = 'An application, that helps you build your own filters for any model and use it.',
long_description = README,
url = 'https://github.com/fynjah/django-pimp-my-filter',
author = 'Anton Ievtushenko',
author_email = 'fynjah@gmail.com',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Python imports
import matplotlib as plt
import numpy as np
import time
import argparse
# Other imports
import srl_example_setup
from simple_rl.tasks import GridWorldMDP
from simple_rl.planning.ValueIterationClass import ValueIteration
# INSTRUCTIONS FOR USE:
# 1. When you run the program [either with default or supplied arguments], a pygame window should pop up.
# This is the first iteration of running VI on the given MDP.
# 2. Press any button to close this pygame window and wait, another window will pop-up displaying the
# policy from the next time step
# 3. Repeat 1 and 2 until the program terminates
# An input function, creates the MDP object based on user input
def generate_MDP(width, height, init_loc, goal_locs, lava_locs, gamma, walls, slip_prob):
""" Creates an MDP object based on user input """
actual_args = {
"width": width,
"height": height,
"init_loc": init_loc,
"goal_locs": goal_locs,
"lava_locs": lava_locs,
"gamma": gamma,
"walls": walls,
"slip_prob": slip_prob,
"lava_cost": 1.0,
"step_cost": 0.1
}
return GridWorldMDP(**actual_args)
def main():
# This accepts arguments from the command line with flags.
# Example usage: python value_iteration_demo.py -w 4 -H 3 -s 0.05 -g 0.95 -il [(0,0)] -gl [(4,3)] -ll [(4,2)] -W [(2,2)]
parser = argparse.ArgumentParser(description='Run a demo that shows a visualization of value iteration on a GridWorld MDP')
# Add the relevant arguments to the argparser
parser.add_argument('-w', '--width', type=int, nargs="?", const=4, default=4,
help='an integer representing the number of cells for the GridWorld width')
parser.add_argument('-H', '--height', type=int, nargs="?", const=3, default=3,
help='an integer representing the number of cells for the GridWorld height')
parser.add_argument('-s', '--slip', type=float, nargs="?", const=0.05, default=0.05,
help='a float representing the probability that the agent will "slip" and not take the intended action')
parser.add_argument('-g', '--gamma', type=float, nargs="?", const=0.95, default=0.95,
help='a float representing the decay probability for Value Iteration')
parser.add_argument('-il', '--i_loc', type=tuple, nargs="?", const=(0,0), default=(0,0),
help='two integers representing the starting cell location of the agent [with zero-indexing]')
parser.add_argument('-gl', '--g_loc', type=list, nargs="?", const=[(3,3)], default=[(3,3)],
help='a sequence of integer-valued coordinates where the agent will receive a large reward and enter a terminal state')
parser.add_argument('-ll', '--l_loc', type=list, nargs="?", const=[(3,2)], default=[(3,2)],
help='a sequence of integer-valued coordinates where the agent will receive a large negative reward and enter a terminal state')
parser.add_argument('-W', '--Walls', type=list, nargs="?", const=[(2,2)], default=[(2,2)],
help='a sequence of integer-valued coordinates representing cells that the agent cannot transition into')
args = parser.parse_args()
mdp = generate_MDP(
args.width,
args.height,
args.i_loc,
args.g_loc,
args.l_loc,
args.gamma,
args.Walls,
args.slip)
# Run value iteration on the mdp and save the history of value backups until convergence
vi = ValueIteration(mdp, max_iterations=50)
_, _, histories = vi.run_vi_histories()
# For every value backup, visualize the policy
for value_dict in histories:
mdp.visualize_policy(lambda in_state: value_dict[in_state]) # Note: This lambda is necessary because the policy must be a function
time.sleep(0.5)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import socket
import time
HOST='data.pr4e.org'
PORT=80
mysock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
mysock.connect((HOST,PORT))
mysock.sendall(b'GET http://data.pr4e.org/cover3.jpg HTTP/1.0\r\n\r\n')
count=0
picture=b""
while True:
data=mysock.recv(5120)
if(len(data)<1):
break
time.sleep(0.25)
count=count+len(data)
print(len(data),count)
picture=picture+data
mysock.close()
#Look for the end of the headers
pos=picture.find(b"\r\n\r\n")
print('Header length',pos)
print(picture[:pos].decode())
#skip past the header and save the picture data
picture=picture[pos+4:]
fhand=open("stuff.jpg","web")
fhand.write(picture)
fhand.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Tropical Cyclone Risk Model (TCRM) - Version 1.0 (beta release)
Copyright (C) 2011 Commonwealth of Australia (Geoscience Australia)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Title: mslp_seasonal_clim.py
Author: Nicholas Summons, nicholas.summons@ga.gov.au
Last modified: 4 June 2010
Description: Utility for creating Mean Sea Level Pressure (MSLP) seasonal climatology maps.
Uses NCEP-DOE Reanalysis 2 data averaged over date range: 1980-2007.
This script can either be run stand alone to create a NetCDF output file or
the class MSLPGrid can be invoked to return the MSLP seasonal average grid.
Acknowledgements:
NCEP-DOE Reanalysis 2 data provided by the NOAA/OAR/ESRL PSD, Boulder, Colorado, USA,
from their Web site at http://www.esrl.noaa.gov/psd/
Input data: mslp_seasonal_clim.nc (contains monthly means averaged over 28 year period)
"""
import os
import numpy as np
import Utilities.nctools as nctools
from Utilities import pathLocator
class MSLPGrid:
def __init__(self, selected_months, filename=''):
if not os.path.isfile(filename):
tcrm_dir = pathLocator.getRootDirectory()
filename = os.path.join(tcrm_dir, 'MSLP', 'mslp_monthly_clim.nc')
if not os.path.isfile(filename):
error_msg = "MSLP data file not found"
raise IOError, error_msg
selected_months = set(selected_months)
ncobj = nctools.ncLoadFile(filename)
mslp_all = nctools.ncGetData(ncobj, 'mslp')
self.lon = nctools.ncGetDims(ncobj, 'lon')
self.lat = nctools.ncGetDims(ncobj, 'lat')
dim0,dim1,dim2 = np.shape(mslp_all)
# Average over selected months
mslp_sum = np.zeros([dim1, dim2], dtype='float32')
for month in selected_months:
mslp_sum = mslp_sum + mslp_all[month-1,:,:]
self.mslp_av = np.flipud(mslp_sum / len(selected_months))
def sampleGrid(self, lon, lat):
"""sampleGrid(self, lon, lat):
Grab nearest value to given location.
No interpolation performed!
"""
indi = self.lon.searchsorted(lon)-1
indj = self.lat.searchsorted(lat)-1
return self.mslp_av[indj, indi]
def returnGrid(self):
return self.lon, self.lat, self.mslp_av
#def main(configFile):
# selected_months_str = str(cnfGetIniValue(configFile, 'DataProcess', 'selected_months', arange(13)))
# selected_months = set(selected_months_str.strip('[]{}() ').replace(',', ' ').split(' '))
# selected_months.discard('')
# if selected_months.issubset([str(k) for k in range(1,13)]):
# selected_months = [int(k) for k in selected_months]
# months_str = ', '.join([calendar.month_abbr[i] for i in sort(list(selected_months))])
# print "Creating Mean Sea Level Pressure (MSLP) seasonal climatology:"
# print "Months specified for seasonal average: " + months_str
# print "Using NCEP Reanalysis-2 data from 1980-2007"
#
# msp = MSLPGrid(selected_months)
# lon, lat, mslp_av = msp.returnGrid()
#
# #Create output file
# output_filename = "mslp_clim_" + ''.join([calendar.month_abbr[i][0] for i in sort(list(selected_months))]) + '.nc'
# data_title = 'MSLP (NCEP Reanalysis-2) seasonal climatology. Averaging period: ' \
# + months_str + ' ' + '1980-2007.'
# dimensions = {0:{'name':'lat','values':lat,'dtype':'f','atts':{'long_name':'Latitude',
# 'units':'degrees_north'} },
# 1:{'name':'lon','values':lon,'dtype':'f','atts':{'long_name':'Longitude',
# 'units':'degrees_east'} } }
#
# variables = {0:{'name':'mslp','dims':('lat','lon'),
# 'values':array(mslp_av),'dtype':'f',
# 'atts':{'long_name':'Mean sea level pressure',
# 'units':'hPa'} } }
# nctools.ncSaveGrid( output_filename, dimensions, variables,
# nodata=-9999,datatitle=data_title )
#
# print "Created output file: " + output_filename
#
#
#if __name__ == "__main__":
# try:
# configFile = sys.argv[1]
# except IndexError:
# # Try loading config file with same name as python script
# configFile = __file__.rstrip('.py') + '.ini'
# # If no filename is specified and default filename doesn't exist => raise error
# if not os.path.exists(configFile):
# error_msg = "No configuration file specified"
# raise IOError, error_msg
# # If config file doesn't exist => raise error
# if not os.path.exists(configFile):
# error_msg = "Configuration file '" + configFile +"' not found"
# raise IOError, error_msg
#
# main(configFile)
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from os import listdir
import config
def read(bound_file_path, compounds_file_path, adducts_file_path):
'''
Read in excel files as pd.DataFrame objects
'''
bound_df = pd.read_excel(bound_file_path)
compounds_df = pd.read_excel(compounds_file_path)
adducts_df = pd.read_excel(adducts_file_path)
adducts_df = adducts_df[adducts_df['Formula'] != 'H']
adducts_df.columns = ['Compound/Fragment', 'Formula', 'Min', 'Max', 'Charge of compound/fragment']
adducts_df['Compound/Fragment Type'] = 'Adducts'
all_compounds = pd.concat([compounds_df, adducts_df], ignore_index=True, sort=False)
return bound_df, all_compounds
def normalise(spectrum):
X = spectrum["I"].to_numpy()
spectrum["normalised_intensity"] = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
return spectrum
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Arguments:
-v, verbose
--local-dir, where we store the ONNX and Caffe2 models
--no-cache, ignore existing models in local-dir
--clean-test-data, delete all the existing test data when updating ONNX model zoo
--add-test-data, add add-test-data sets of test data for each ONNX model
--only-local, run locally (for testing purpose)
Examples:
# store the data in /home/username/zoo-dir, delete existing test data, ignore local cache,
# and generate 3 sets of new test data
python update-caffe2-models.py --local-dir /home/username/zoo-dir --clean-test-data --no-cache --add-test-data 3
"""
# TODO: Add GPU support
def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False):
if only_local:
print('No uploading in local only mode.')
return
model_dir = os.path.join(zoo_dir, model_name)
suffix = '-backup' if backup else ''
if backup:
print('Backing up the previous version of ONNX model {}...'.format(model_name))
rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
abs_file_name = os.path.join(zoo_dir, rel_file_name)
print('Compressing {} model to {}'.format(model_name, abs_file_name))
with tarfile.open(abs_file_name, 'w:gz') as f:
f.add(model_dir, arcname=model_name)
file_size = os.stat(abs_file_name).st_size
print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024))
client = boto3.client('s3', 'us-east-1')
transfer = boto3.s3.transfer.S3Transfer(client)
transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name),
extra_args={'ACL': 'public-read'})
print('Successfully uploaded {} to s3!'.format(rel_file_name))
def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
return
else:
shutil.rmtree(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name)
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
print('Downloading ONNX model {} from {} and save in {} ...\n'.format(
model_name, url, download_file.name))
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir))
t.extractall(zoo_dir)
except Exception as e:
print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
finally:
os.remove(download_file.name)
if not only_local:
upload_onnx_model(model_name, zoo_dir, backup=True, only_local=only_local)
def download_caffe2_model(model_name, zoo_dir, use_cache=True):
model_dir = os.path.join(zoo_dir, model_name)
if os.path.exists(model_dir):
if use_cache:
return
else:
shutil.rmtree(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model_name, f)
dest = os.path.join(model_dir, f)
try:
try:
downloadFromURLToFile(url, dest,
show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print("Cleaning up...")
deleteDirectory(model_dir)
raise
def caffe2_to_onnx(caffe2_model_name, caffe2_model_dir):
caffe2_init_proto = caffe2_pb2.NetDef()
caffe2_predict_proto = caffe2_pb2.NetDef()
with open(os.path.join(caffe2_model_dir, 'init_net.pb'), 'rb') as f:
caffe2_init_proto.ParseFromString(f.read())
caffe2_init_proto.name = '{}_init'.format(caffe2_model_name)
with open(os.path.join(caffe2_model_dir, 'predict_net.pb'), 'rb') as f:
caffe2_predict_proto.ParseFromString(f.read())
caffe2_predict_proto.name = caffe2_model_name
with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'rb') as f:
value_info = json.loads(f.read())
print('Converting Caffe2 model {} in {} to ONNX format'.format(caffe2_model_name, caffe2_model_dir))
onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model(
init_net=caffe2_init_proto,
predict_net=caffe2_predict_proto,
value_info=value_info
)
return onnx_model, caffe2_init_proto, caffe2_predict_proto
def tensortype_to_ndarray(tensor_type):
shape = []
for dim in tensor_type.shape.dim:
shape.append(dim.dim_value)
if tensor_type.elem_type == onnx.TensorProto.FLOAT:
type = np.float32
elif tensor_type.elem_type == onnx.TensorProto.INT:
type = np.int32
else:
raise
array = np.random.rand(*shape).astype(type)
return array
def generate_test_input_data(onnx_model, scale):
real_inputs_names = list(set([input.name for input in onnx_model.graph.input]) - set([init.name for init in onnx_model.graph.initializer]))
real_inputs = []
for name in real_inputs_names:
for input in onnx_model.graph.input:
if name == input.name:
real_inputs.append(input)
test_inputs = []
for input in real_inputs:
ndarray = tensortype_to_ndarray(input.type.tensor_type)
test_inputs.append((input.name, ndarray * scale))
return test_inputs
def generate_test_output_data(caffe2_init_net, caffe2_predict_net, inputs):
p = c2_workspace.Predictor(caffe2_init_net, caffe2_predict_net)
inputs_map = {input[0]:input[1] for input in inputs}
output = p.run(inputs_map)
c2_workspace.ResetWorkspace()
return output
def onnx_verify(onnx_model, inputs, ref_outputs):
prepared = caffe2.python.onnx.backend.prepare(onnx_model)
onnx_inputs = []
for input in inputs:
if isinstance(input, tuple):
onnx_inputs.append(input[1])
else:
onnx_inputs.append(input)
onnx_outputs = prepared.run(inputs=onnx_inputs)
np.testing.assert_almost_equal(onnx_outputs, ref_outputs, decimal=3)
model_mapping = {
'bvlc_alexnet': 'bvlc_alexnet',
'bvlc_googlenet': 'bvlc_googlenet',
'bvlc_reference_caffenet': 'bvlc_reference_caffenet',
'bvlc_reference_rcnn_ilsvrc13': 'bvlc_reference_rcnn_ilsvrc13',
'densenet121': 'densenet121',
#'finetune_flickr_style': 'finetune_flickr_style',
'inception_v1': 'inception_v1',
'inception_v2': 'inception_v2',
'resnet50': 'resnet50',
'shufflenet': 'shufflenet',
'squeezenet': 'squeezenet_old',
#'vgg16': 'vgg16',
'vgg19': 'vgg19',
'zfnet512': 'zfnet512',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update the ONNX models.')
parser.add_argument('-v', action="store_true", default=False, help="verbose")
parser.add_argument("--local-dir", type=str, default=os.path.expanduser('~'),
help="local dir to store Caffe2 and ONNX models")
parser.add_argument("--no-cache", action="store_true", default=False,
help="whether use local ONNX models")
parser.add_argument('--clean-test-data', action="store_true", default=False,
help="remove the old test data")
parser.add_argument('--add-test-data', type=int, default=0,
help="add new test data")
parser.add_argument('--only-local', action="store_true", default=False,
help="no upload including backup")
args = parser.parse_args()
delete_test_data = args.clean_test_data
add_test_data = args.add_test_data
use_cache = not args.no_cache
only_local = args.only_local
root_dir = args.local_dir
caffe2_zoo_dir = os.path.join(root_dir, ".caffe2", "models")
onnx_zoo_dir = os.path.join(root_dir, ".onnx", "models")
for onnx_model_name in model_mapping:
c2_model_name = model_mapping[onnx_model_name]
print('####### Processing ONNX model {} ({} in Caffe2) #######'.format(onnx_model_name, c2_model_name))
download_caffe2_model(c2_model_name, caffe2_zoo_dir, use_cache=use_cache)
download_onnx_model(onnx_model_name, onnx_zoo_dir, use_cache=use_cache, only_local=only_local)
onnx_model_dir = os.path.join(onnx_zoo_dir, onnx_model_name)
if delete_test_data:
print('Deleting all the existing test data...')
# NB: For now, we don't delete the npz files.
#for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
# os.remove(f)
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
shutil.rmtree(f)
onnx_model, c2_init_net, c2_predict_net = caffe2_to_onnx(c2_model_name, os.path.join(caffe2_zoo_dir, c2_model_name))
print('Deleteing old ONNX {} model...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, 'model*'.format(onnx_model_name))):
os.remove(f)
print('Serializing generated ONNX {} model ...'.format(onnx_model_name))
with open(os.path.join(onnx_model_dir, 'model.onnx'), 'wb') as file:
file.write(onnx_model.SerializeToString())
print('Verifying model {} with ONNX model checker...'.format(onnx_model_name))
onnx.checker.check_model(onnx_model)
total_existing_data_set = 0
print('Verifying model {} with existing test data...'.format(onnx_model_name))
for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')):
test_data = np.load(f, encoding='bytes')
inputs = list(test_data['inputs'])
ref_outputs = list(test_data['outputs'])
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
for f in glob.glob(os.path.join(onnx_model_dir, 'test_data_set*')):
inputs = []
inputs_num = len(glob.glob(os.path.join(f, 'input_*.pb')))
for i in range(inputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'input_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
inputs.append(numpy_helper.to_array(tensor))
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(f, 'output_*.pb')))
for i in range(ref_outputs_num):
tensor = onnx.TensorProto()
with open(os.path.join(f, 'output_{}.pb'.format(i)), 'rb') as pf:
tensor.ParseFromString(pf.read())
ref_outputs.append(numpy_helper.to_array(tensor))
onnx_verify(onnx_model, inputs, ref_outputs)
total_existing_data_set += 1
starting_index = 0
while os.path.exists(os.path.join(onnx_model_dir, 'test_data_set_{}'.format(starting_index))):
starting_index += 1
if total_existing_data_set == 0 and add_test_data == 0:
add_test_data = 3
total_existing_data_set = 3
print('Generating {} sets of new test data...'.format(add_test_data))
for i in range(starting_index, add_test_data + starting_index):
data_dir = os.path.join(onnx_model_dir, 'test_data_set_{}'.format(i))
os.makedirs(data_dir)
inputs = generate_test_input_data(onnx_model, 255)
ref_outputs = generate_test_output_data(c2_init_net, c2_predict_net, inputs)
onnx_verify(onnx_model, inputs, ref_outputs)
for index, input in enumerate(inputs):
tensor = numpy_helper.from_array(input[1])
with open(os.path.join(data_dir, 'input_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
for index, output in enumerate(ref_outputs):
tensor = numpy_helper.from_array(output)
with open(os.path.join(data_dir, 'output_{}.pb'.format(index)), 'wb') as file:
file.write(tensor.SerializeToString())
del onnx_model
del c2_init_net
del c2_predict_net
upload_onnx_model(onnx_model_name, onnx_zoo_dir, backup=False, only_local=only_local)
print('\n\n')
|
nilq/baby-python
|
python
|
# Copyright (c) 2018 Graphcore Ltd. All rights reserved.
# This script is run by the release agent to create a release of PopTorch
def install_release(release_utils, release_id, snapshot_id, version_str):
# Tag must contain the string 'poptorch' to keep it unique.
tag = "{}-poptorch".format(version_str)
release_utils.log.info('Tagging poptorch release ' + tag)
# Create the release on the document server.
release_utils.create_document_release(snapshot_id)
# Tag the view repository with the release.
release_utils.tag_view_repo(
'ssh://git@phabricator.sourcevertex.net/diffusion/' \
+ 'POPONNXVIEW/poponnxview.git',
snapshot_id,
release_id,
tag)
# Increment the point version number.
release_utils.increment_version_point(
'ssh://git@phabricator.sourcevertex.net/diffusion/' \
+ 'POPTORCH/poptorch.git')
|
nilq/baby-python
|
python
|
import json
import pytest
from ermaket.api.database import DBConn
from ermaket.api.scripts import ScriptManager
from ermaket.api.system.hierarchy import Activation, Trigger, Triggers
CHAIN_ID = 1
TEAPOT_ID = 2
ADD_ID = 3
ADD2_ID = 4
def login(client, user):
return client.post(
'/auth/login', data={
"login": user.login,
"password": user.password
}
)
@pytest.mark.usefixtures("client", "test_db")
def test_login(test_db, client):
assert not client.get('/auth/current').json['ok']
assert not client.post('/auth/logout').json['ok']
response = login(client, test_db.admin_user)
assert response.json["ok"]
response = client.get('/auth/current')
assert response.json['user']['login'] == test_db.admin_user.login
hierarchy = response.json['hierarchy']
assert hierarchy
assert len(next(iter(hierarchy['hierarchy']))['children']) > 0
rights = response.json['rights']
assert rights
profile_forms = response.json['profile_forms']
assert len(profile_forms) > 0
response = client.post('/auth/logout')
assert response.json['ok']
assert not client.get('/auth/current').json['ok']
assert not client.post('/auth/logout').json['ok']
@pytest.mark.usefixtures("client", "test_db")
def test_password(test_db, client):
assert login(client, test_db.admin_user).json["ok"]
assert not client.put(
'/auth/password',
data={
"old_pass": "Surely wrong password, noone would ever set this",
"new_pass": "1234567890"
}
).json['ok']
client.post('/auth/logout')
assert login(client, test_db.admin_user).json["ok"]
assert client.put(
'/auth/password',
data={
"old_pass": test_db.admin_user.password,
"new_pass": "1234567890"
}
).json["ok"]
client.post('/auth/logout')
assert not login(client, test_db.admin_user).json["ok"]
assert client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": "1234567890"
}
).json["ok"]
assert client.put(
'/auth/password',
data={
"old_pass": "1234567890",
"new_pass": test_db.admin_user.password
}
).json["ok"]
client.post('/auth/logout')
def print_roles(test_db):
from ermaket.api.database import DBConn
normal, admin = test_db.normal_user.user, test_db.admin_user.user
with DBConn.get_session() as db:
db.add(normal)
db.add(admin)
print(f'Normal roles: {normal.role_names}')
print(f'Admin roles: {admin.role_names}')
@pytest.mark.usefixtures("client", "test_db")
def test_get(client, test_db):
schema = test_db.schema
assert client.get('/tables/foo/bar').status_code == 404
name = test_db.model.__table__.name
table_url = f'/tables/table/{schema}/{name}'
entry_url = f'/tables/entry/{schema}/{name}'
assert client.get(table_url).status_code == 401
assert client.get(entry_url).status_code == 401
login(client, test_db.admin_user)
response = client.get(table_url)
assert response.status_code == 200
assert len(response.json) > 0
response = client.get(entry_url)
assert response.status_code == 200
assert response.json
client.post('/auth/logout')
login(client, test_db.normal_user)
# assert client.get(table_url).status_code == 403
# assert client.get(entry_url).status_code == 403
client.post('/auth/logout')
@pytest.mark.usefixtures("client", "test_db")
def test_transaction(client, test_db):
model = test_db.model
entry = test_db.entry
with DBConn.get_session() as db:
item = db.query(model).first()
data = model.__marshmallow__().dump(item)
key = data[entry.pk.rowName]
transaction = {entry.id: {'delete': {key: True}}}
login(client, test_db.admin_user)
response = client.post(
'/transaction/execute',
data=json.dumps({'transaction': transaction}),
content_type='application/json'
)
assert response.status_code == 200
@pytest.mark.usefixtures("client", "test_db")
def test_sql(client, test_db):
schema, table = test_db.schema, test_db.entry.tableName
query = f'SELECT * FROM {schema}.{table}'
login(client, test_db.admin_user)
response = client.post(
'/sql/execute',
data=json.dumps({'query': query}),
content_type='application/json'
)
assert response.status_code == 200
assert len(response.json["result"][0]) == len(response.json["keys"])
@pytest.mark.usefixtures("client", "test_db")
def test_call_script(client, test_db):
login(client, test_db.admin_user)
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_1'
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_2'
response = client.post(
f"/scripts/execute/{CHAIN_ID}",
data=json.dumps({'activation': 'call'}),
content_type='application/json'
)
assert response.status_code == 200
assert response.json['businessLogic']['done'] == 'step_1'
@pytest.mark.usefixtures("client", "test_db")
def test_abort_request(client, test_db):
test_db.entry.triggerList = Triggers([Trigger(Activation.READ, TEAPOT_ID)])
login(client, test_db.admin_user)
schema, name = test_db.entry.schema, test_db.entry.tableName
table_url = f'/tables/table/{schema}/{name}'
entry_url = f'/tables/entry/{schema}/{name}'
assert client.get(table_url).status_code == 418
assert client.get(entry_url).status_code == 418
mgr = ScriptManager()
mgr.global_triggers.append(Trigger(Activation.TRANSACTION, TEAPOT_ID))
model = test_db.model
entry = test_db.entry
with DBConn.get_session() as db:
item = db.query(model).first()
data = model.__marshmallow__().dump(item)
key = data[entry.pk.rowName]
transaction = {entry.id: {'delete': {key: True}}}
response = client.post(
'/transaction/execute',
data=json.dumps({'transaction': transaction}),
content_type='application/json'
)
assert response.status_code == 418
client.post('/auth/logout')
mgr.global_triggers.append(Trigger(Activation.LOGIN, TEAPOT_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 418
mgr.global_triggers = Triggers([])
@pytest.mark.usefixtures("client", "test_db")
def test_add_info(client, test_db):
mgr = ScriptManager()
mgr.global_triggers.append(Trigger(Activation.LOGIN, ADD_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 200
assert response.json['businessLogic']['data'] == "EXAMPLE_DATA"
client.post('/auth/logout')
mgr.global_triggers.append(Trigger(Activation.LOGIN, ADD2_ID))
response = client.post(
'/auth/login',
data={
"login": test_db.admin_user.login,
"password": test_db.admin_user.password
}
)
assert response.status_code == 200
assert response.json['businessLogic']['data'] == "EXAMPLE_DATA"
assert response.json['businessLogic']['data2'] == "EXAMPLE_DATA2"
mgr.global_triggers = Triggers([])
@pytest.mark.usefixtures("client", "test_db")
def test_register(client, test_db):
login(client, test_db.admin_user)
token_simple = client.post(
'/auth/register_token', data={
'name': 'test',
}
).json['token']
token_admin = client.post(
'/auth/register_token',
data=json.dumps({
'name': 'test',
'roles': ['admin']
}),
content_type='application/json'
).json['token']
assert token_admin is not None
assert token_simple is not None
client.post('/auth/logout')
assert client.post(
'/auth/register',
data={
'token': token_simple,
'login': 'manager_1',
'password': '12345'
}
).json['ok']
assert client.get('/auth/current').json['ok']
client.post('/auth/logout')
assert client.post(
'/auth/login', data={
'login': 'manager_1',
'password': '12345'
}
).json['ok']
client.post('/auth/logout')
assert client.post(
'/auth/register',
data={
'token': token_admin,
'login': 'manager_2',
'password': '12345'
}
).json['ok']
assert 'admin' in client.get('/auth/current').json['user']['roles']
client.post('/auth/logout')
@pytest.mark.usefixtures("client", "test_db")
def test_reset_password(client, test_db):
login(client, test_db.admin_user)
token_simple = client.post(
'/auth/register_token', data={
'name': 'test',
}
).json['token']
assert client.post(
'/auth/register',
data={
'token': token_simple,
'login': 'manager_10',
'password': '12345'
}
).json['ok']
assert client.get('/auth/current').json['ok']
client.post('/auth/logout')
login(client, test_db.admin_user)
token_reset = client.post(
'/auth/reset_password_token',
data={
'name': 'test',
'login': 'manager_10'
}
).json['token']
assert token_reset is not None
client.post('/auth/logout')
assert client.put(
'/auth/reset_password',
data={
'token': token_reset,
'login': 'manager_10',
'password': '54321'
}
).json['ok']
assert client.post(
'/auth/login', data={
'login': 'manager_10',
'password': '54321'
}
)
client.post('/auth/logout')
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from django.views.generic.base import RedirectView
from tests.views import foo, foo_api
urlpatterns = [
url(r"^api/foo/$", foo_api),
url(r"^foo/$", foo),
url(r"^bar/$", RedirectView.as_view(url="/foo/", permanent=False)),
]
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.