content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""
Immutable config schema objects.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from enum import Enum
MASTER_NAMESPACE = "MASTER"
CLEANUP_ACTION_NAME = 'cleanup'
def config_object_factory(name, required=None, optional=None):
"""
Creates a namedtuple which has two additional attributes:
required_keys:
all keys required to be set on this configuration object
optional keys:
optional keys for this configuration object
The tuple is created from required + optional
"""
required = required or []
optional = optional or []
config_class = namedtuple(name, required + optional)
# make last len(optional) args actually optional
config_class.__new__.__defaults__ = (None, ) * len(optional)
config_class.required_keys = required
config_class.optional_keys = optional
return config_class
TronConfig = config_object_factory(
name='TronConfig',
optional=[
'output_stream_dir', # str
'action_runner', # ConfigActionRunner
'state_persistence', # ConfigState
'command_context', # dict of str
'ssh_options', # ConfigSSHOptions
'time_zone', # pytz time zone
'nodes', # dict of ConfigNode
'node_pools', # dict of ConfigNodePool
'jobs', # dict of ConfigJob
'mesos_options', # ConfigMesos
'eventbus_enabled', # bool or None
],
)
NamedTronConfig = config_object_factory(
name='NamedTronConfig',
optional=[
'jobs', # dict of ConfigJob
],
)
ConfigActionRunner = config_object_factory(
'ConfigActionRunner',
optional=['runner_type', 'remote_status_path', 'remote_exec_path'],
)
ConfigSSHOptions = config_object_factory(
name='ConfigSSHOptions',
optional=[
'agent',
'identities',
'known_hosts_file',
'connect_timeout',
'idle_connection_timeout',
'jitter_min_load',
'jitter_max_delay',
'jitter_load_factor',
],
)
ConfigNode = config_object_factory(
name='ConfigNode',
required=['hostname'],
optional=['name', 'username', 'port'],
)
ConfigNodePool = config_object_factory('ConfigNodePool', ['nodes'], ['name'])
ConfigState = config_object_factory(
name='ConfigState',
required=[
'name',
'store_type',
],
optional=[
'buffer_size',
'dynamodb_region',
'table_name',
],
)
ConfigMesos = config_object_factory(
name='ConfigMesos',
optional=[
'master_address',
'master_port',
'secret_file',
'principal',
'role',
'enabled',
'default_volumes',
'dockercfg_location',
'offer_timeout',
],
)
ConfigJob = config_object_factory(
name='ConfigJob',
required=[
'name', # str
'node', # str
'schedule', # Config*Scheduler
'actions', # dict of ConfigAction
'namespace', # str
],
optional=[
'monitoring', # dict
'queueing', # bool
'run_limit', # int
'all_nodes', # bool
'cleanup_action', # ConfigAction
'enabled', # bool
'allow_overlap', # bool
'max_runtime', # datetime.Timedelta
'time_zone', # pytz time zone
'expected_runtime', # datetime.Timedelta
],
)
ConfigAction = config_object_factory(
name='ConfigAction',
required=[
'name', # str
'command', # str
],
optional=[
'requires', # tuple of str
'node', # str
'retries', # int
'retries_delay', # datetime.Timedelta
'executor', # str
'cpus', # float
'mem', # float
'disk', # float
'constraints', # List of ConfigConstraint
'docker_image', # str
'docker_parameters', # List of ConfigParameter
'env', # dict
'extra_volumes', # List of ConfigVolume
'expected_runtime', # datetime.Timedelta
'trigger_downstreams', # None, bool or dict
'triggered_by', # list or None
'on_upstream_rerun', # ActionOnRerun or None
'trigger_timeout', # datetime.deltatime or None
],
)
ConfigCleanupAction = config_object_factory(
name='ConfigCleanupAction',
required=[
'command', # str
],
optional=[
'name', # str
'node', # str
'retries', # int
'retries_delay', # datetime.Timedelta
'expected_runtime', # datetime.Timedelta
'executor', # str
'cpus', # float
'mem', # float
'disk', # float
'constraints', # List of ConfigConstraint
'docker_image', # str
'docker_parameters', # List of ConfigParameter
'env', # dict
'extra_volumes', # List of ConfigVolume
'trigger_downstreams', # None, bool or dict
'triggered_by', # list or None
'on_upstream_rerun', # ActionOnRerun or None
'trigger_timeout', # datetime.deltatime or None
],
)
ConfigConstraint = config_object_factory(
name='ConfigConstraint',
required=[
'attribute',
'operator',
'value',
],
optional=[],
)
ConfigVolume = config_object_factory(
name='ConfigVolume',
required=[
'container_path',
'host_path',
'mode',
],
optional=[],
)
ConfigParameter = config_object_factory(
name='ConfigParameter',
required=[
'key',
'value',
],
optional=[],
)
StatePersistenceTypes = Enum(
'StatePersistenceTypes', dict(shelve='shelve', yaml='yaml', dynamodb='dynamodb')
)
ExecutorTypes = Enum('ExecutorTypes', dict(ssh='ssh', mesos='mesos'))
ActionRunnerTypes = Enum('ActionRunnerTypes', dict(none='none', subprocess='subprocess'))
VolumeModes = Enum('VolumeModes', dict(RO='RO', RW='RW'))
ActionOnRerun = Enum('ActionOnRerun', dict(rerun='rerun'))
|
nilq/baby-python
|
python
|
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010-2011 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Container import Container
class HelpEntry (Widget):
def __init__ (self, title, ref):
Widget.__init__ (self)
self.title = title
self.ref = ref
def Render (self):
if '://' in self.ref:
url = self.ref
else:
url = "/help/%s.html" %(self.ref)
render = Widget.Render(self)
render.html = '<div class="help_entry"><a href="%s" target="cherokee_help">%s</a></div>' %(url, self.title)
return render
def __repr__ (self):
return "<CTK.Help.HelpEntry: '%s', '%s', id=%d>"%(self.title, self.ref, id(self))
class HelpGroup (Widget):
def __init__ (self, name, group=[]):
Widget.__init__ (self)
self.name = name
self.entries = []
for entry in group:
self += entry
def __add__ (self, entry):
assert (isinstance(entry, HelpEntry) or
isinstance(entry, HelpGroup))
# Add it
self.entries.append (entry)
return self
def Render (self):
render = Widget.Render(self)
for entry in self.entries:
render += entry.Render()
render.html = '<div class="help_group" id="help_group_%s">%s</div>' %(self.name, render.html)
return render
def __repr__ (self):
txt = ', '.join([e.__repr__() for e in self.entries])
return "<CTK.Help.HelpGroup: id=%d, %s>"%(id(self), txt)
def toJSON (self):
all = []
for entry in self.entries:
if isinstance(entry, HelpEntry):
all.append ((entry.title, entry.ref))
else:
all += entry.toJSON()
return all
class HelpMenu (Widget):
def __init__ (self, helps=None):
Widget.__init__ (self)
if not helps:
self.helps = []
else:
self.helps = helps[:]
def __add__ (self, helps):
if type(helps) == list:
for entry in helps:
self._add_single (entry)
else:
self._add_single (entry)
return self
def _add_single (self, entry):
assert (isinstance (entry, HelpEntry) or
isinstance (entry, HelpGroup))
self.helps.append (entry)
def Render (self):
# Empty response
render = Widget.Render(self)
# Render the help entries
for entry in self.helps:
render.html += entry.Render().html
# Wrap the list of entries
render.html = '<div class="help">%s</div>' %(render.html)
return render
|
nilq/baby-python
|
python
|
from functools import wraps
from logzero import logger
from driver_singleton import DriverSingleton
def requires_url(required_url):
def inner_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
if DriverSingleton.get_driver().current_url != required_url:
DriverSingleton.get_driver().get(required_url)
except Exception as e:
logger.exception(e)
DriverSingleton.get_driver().get(required_url)
return func(*args, **kwargs)
return wrapper
return inner_function
|
nilq/baby-python
|
python
|
from django.utils import six
from debug_toolbar_multilang.pseudo import STR_FORMAT_PATTERN, \
STR_FORMAT_NAMED_PATTERN
from debug_toolbar_multilang.pseudo.pseudo_language import PseudoLanguage
class ExpanderPseudoLanguage(PseudoLanguage):
"""
Pseudo Language for expanding the strings. This is useful
for verifying that the message still fits on the screen.
Remember that some words are much more longer in other
languages than in English. For instance, German words
that 30% more space in average.
"""
def make_pseudo(self, message):
# message without %s or {} in it.
# {test} or %(test)s is allowed, though.
safeMessage = list(message)
# find every matching string
for match in reversed(list(STR_FORMAT_PATTERN.finditer(message))):
# Check if string uses the "named format".
# If not, the string will be replaced and saved
# into safeMessage
if not STR_FORMAT_NAMED_PATTERN.match(match.group()):
start, end = match.span()
safeMessage[start:end] = "???"
# create complete message by using the original, appending
# a space and finally converting the safeMessage to a string
# again.
return "%s %s" % (message, "".join(safeMessage))
def language(self):
return "pse-expander"
@property
def name(self):
return "Pseudo-Expander Language"
|
nilq/baby-python
|
python
|
# Jak znaleźć najkrótsze ścieżki z wierzchołka s do wszystkich innych w acyklicznym grafie skierowanym?
from math import inf
def dfs(graph, source, visited, result):
visited[source] = True
for v in graph[source]:
if not visited[v[0]]:
dfs(graph, v[0], visited, result)
result.insert(0, source)
def shortest_paths(graph, s):
visited = [False] * len(graph)
distance = [inf] * len(graph)
distance[s] = 0
result = []
for i in range(len(graph)):
if not visited[i]:
dfs(graph, i, visited, result)
idx = result.index(s)
for i in range(idx, len(result)):
for v in graph[i]:
if distance[v[0]] > distance[i] + v[1]:
distance[v[0]] = distance[i] + v[1]
return distance
graph = [[(1, 3), (2, 6)],
[(2, 2), (3, 1), (5, 8)],
[(4, 7), (3, 5)],
[(5, 2), (4, 5)],
[(5, 3)],
[]]
print(shortest_paths(graph, 0))
|
nilq/baby-python
|
python
|
import unittest
from unittest.mock import patch
from tmc import points, reflect
from tmc.utils import load, load_module, reload_module, get_stdout, check_source, sanitize
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
from datetime import date, datetime, timedelta
exercise = 'src.series'
classname = "Series"
def f(attr: list):
return ",".join(attr)
class SeriesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Input was not expected")]):
cls.module = load_module(exercise, 'fi')
def test_0a_main_ok(self):
ok, line = check_source(self.module)
message = """All code testing the functions must be inside the
if __name__ == "__main__":
block. The following line must be moved:
"""
self.assertTrue(ok, message+line)
@points('8.series_part1')
def test1_class_exists(self):
try:
from src.series import Series
except:
self.fail("Your program should have a class called Series")
@points('8.series_part1')
def test2_constructor(self):
try:
from src.series import Series
series = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
except Exception as e:
self.fail('Calling constructor as Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])' +
f' threw an error: {e}\nCheck that constructor is correctly defined!')
@points('8.series_part1')
def test3_test_str(self):
test_case = ("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
try:
from src.series import Series
code = f'Series("{test_case[0]}", {test_case[1]}, {test_case[2]})'
series = Series(test_case[0], test_case[1], test_case[2])
genres = ", ".join(test_case[2])
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\nno ratings'
val = str(series)
self.assertEqual(sanitize(corr), sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns\n{val}")
except Exception as e:
self.fail(f'Calling method __str__ threw an error: {e}\nwhen object was created as\n{code}')
@points('8.series_part1')
def test3_test_str2(self):
test_case = ("South Park", 24, ["Animation", "Comedy"])
try:
from src.series import Series
code = f'Series("{test_case[0]}", {test_case[1]}, {test_case[2]})'
series = Series(test_case[0], test_case[1], test_case[2])
genres = ", ".join(test_case[2])
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\nno ratings'
val = str(series)
self.assertEqual(sanitize(corr), sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns\n{val}")
except Exception as e:
self.fail(f'Calling method __str__ threw an error: {e}\nwhen object was created as\n{code}')
@points('8.series_part2')
def test5_rate_exists(self):
try:
from src.series import Series
code = """
s = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s.rate(5)
"""
s = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s.rate(5)
except Exception as e:
self.fail(f'Executing code\n{code}\threw an error\n{e}\nCheck that method rate(self, arvosana: int) is defined.')
@points('8.series_part2')
def test5_rate(self):
from src.series import Series
code = """
s = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s.rate(5)
"""
test_case = ("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s.rate(5)
arv = "1 ratings, average 5.0 points"
genres = ", ".join(test_case[2])
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\n{arv}'
val = str(s)
self.assertTrue(sanitize(corr) == sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns a string\n{val}")
s.rate(3)
code += "s.rate(3)\n"
arv = "2 ratings, average 4.0 points"
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\n{arv}'
val = str(s)
self.assertTrue(sanitize(corr) == sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns a string\n{val}")
s.rate(2)
code += "s.rate(2)\n"
arv = "3 ratings, average 3.3 points"
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\n{arv}'
val = str(s)
self.assertTrue(sanitize(corr) == sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns a string\n{val}")
s.rate(5)
code += "s.rate(5)\n"
arv = "4 ratings, average 3.8 points"
corr = f'{test_case[0]} ({test_case[1]} seasons)\ngenres: {genres}\n{arv}'
val = str(s)
self.assertTrue(sanitize(corr) == sanitize(val), f"Method __str__ should return a string\n{corr}\nwhen object was created as\n" +
f"{code}\nNow method returns a string\n{val}")
@points('8.series_part3')
def test6_function_minimum_grade_olemassa(self):
try:
from src.series import minimum_grade
except:
self.fail("Your program should have a function called minimum_grade(grade: float, series: list)")
@points('8.series_part3')
def test7_function_minimum_grade(self):
from src.series import minimum_grade
from src.series import Series
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
code = """
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
vastaus = minimum_grade(4.5, series)
"""
try:
answer = minimum_grade(4.5, series)
except:
self.fail(f"Check that the following code can be executed\n{code}")
self.assertTrue(type(answer) == list, "Function minimum_grade(arvosana: float, series: list) should return a list")
expected = 1
self.assertTrue(len(answer)==expected, f"When this code is executed\n{code}\nthe length of the list returned should be {expected}, however, it was {len(answer)}")
self.assertTrue(answer[0].title=="Dexter", f"When this code is executed,\n{code}\nthe only series in the list should be Dexter, list however is {answer[0].title}")
code = """
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
vastaus = minimum_grade(1.5, series)
"""
try:
answer = minimum_grade(2.5, series)
except:
self.fail(f"Check that the following code can be executed\n{code}")
self.assertTrue(type(answer) == list, "Function minimum_grade(grade: float, series: list) should return a list")
expected = 2
self.assertTrue(len(answer)==expected, f"When this code is executed\n{code}\nthe length of the list returned should be {expected}, however, it was {len(answer)}")
ehto = (answer[0].title=="Dexter" and answer[1].title=="South Park") or (answer[1].title=="Dexter" and answer[0].title=="South Park")
self.assertTrue(ehto, f"When this code is executed code\n{code}\nthe list should include Dexter and South park, now the list was {answer[0].title} ja {answer[1].title}")
@points('8.series_part3')
def test8_function_includes_genre_olemassa(self):
try:
from src.series import includes_genre
except:
self.fail("Your program should include a function includes_genre(genre: str, series: list)")
@points('8.series_part3')
def test9_function_includes_genre(self):
from src.series import includes_genre
from src.series import Series
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
code = """
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
vastaus = includes_genre("Crime", series)
"""
try:
answer = includes_genre("Crime", series)
except:
self.fail(f"Check that the following code can be executedn{code}")
self.assertTrue(type(answer) == list, "Function includes_genre(genre: str, series: list) should return a list")
expected = 1
self.assertTrue(len(answer)==expected, f"When this code is executed\n{code}\nthe length of the list returned should be {expected}, however, it was {len(answer)}")
self.assertTrue(answer[0].title=="Dexter", f"When this code is executed,\n{code}\nthe only series in the list should be Dexter, list however is {answer[0].title}")
code = """
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
vastaus = includes_genre("Programming", series)
"""
try:
answer = includes_genre("Programming", series)
except:
self.fail(f"Check that the following code can be executedn{code}")
expected = 0
self.assertTrue(len(answer)==expected, f"When this code is executed\n{code}\nthe length of the list returned should be {expected}, however, it was {len(answer)}")
code = """
s1 = Series("Dexter", 8, ["Crime", "Drama", "Mystery", "Thriller"])
s1.rate(5)
s2 = Series("South Park", 24, ["Animation", "Comedy"])
s2.rate(3)
s3 = Series("Friends", 10, ["Romance", "Comedy"])
s3.rate(2)
series = [s1, s2, s3]
vastaus = includes_genre("Comedy", series)
"""
try:
answer = includes_genre("Comedy", series)
except:
self.fail(f"Check that the following code can be executedu\n{code}")
expected = 2
self.assertTrue(len(answer)==expected, f"When this code is executed\n{code}\nthe length of the list returned should be {expected}, however, it was {len(answer)}")
ehto = (answer[0].title=="Friends" and answer[1].title=="South Park") or (answer[1].title=="Friends" and answer[0].title=="South Park")
self.assertTrue(ehto, f"When this code is executed code\n{code}\nthe list should include Friends and South park, now the list was {answer[0].title} ja {answer[1].title}")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from typing import Dict
import base64
import json
import logging
import os
from shlex import quote as shq
from gear.cloud_config import get_global_config
from ....batch_configuration import DOCKER_ROOT_IMAGE, DOCKER_PREFIX, DEFAULT_NAMESPACE, INTERNAL_GATEWAY_IP
from ....file_store import FileStore
from ....instance_config import InstanceConfig
from ...resource_utils import unreserved_worker_data_disk_size_gib
from ..resource_utils import gcp_machine_type_to_worker_type_and_cores
log = logging.getLogger('create_instance')
BATCH_WORKER_IMAGE = os.environ['HAIL_BATCH_WORKER_IMAGE']
log.info(f'BATCH_WORKER_IMAGE {BATCH_WORKER_IMAGE}')
def create_vm_config(
file_store: FileStore,
resource_rates: Dict[str, float],
zone: str,
machine_name: str,
machine_type: str,
activation_token: str,
max_idle_time_msecs: int,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
boot_disk_size_gb: int,
preemptible: bool,
job_private: bool,
project: str,
instance_config: InstanceConfig,
) -> dict:
_, cores = gcp_machine_type_to_worker_type_and_cores(machine_type)
if local_ssd_data_disk:
worker_data_disk = {
'type': 'SCRATCH',
'autoDelete': True,
'interface': 'NVME',
'initializeParams': {'diskType': f'zones/{zone}/diskTypes/local-ssd'},
}
worker_data_disk_name = 'nvme0n1'
else:
worker_data_disk = {
'autoDelete': True,
'initializeParams': {
'diskType': f'projects/{project}/zones/{zone}/diskTypes/pd-ssd',
'diskSizeGb': str(data_disk_size_gb),
},
}
worker_data_disk_name = 'sdb'
if job_private:
unreserved_disk_storage_gb = data_disk_size_gb
else:
unreserved_disk_storage_gb = unreserved_worker_data_disk_size_gib(data_disk_size_gb, cores)
assert unreserved_disk_storage_gb >= 0
make_global_config = ['mkdir /global-config']
global_config = get_global_config()
for name, value in global_config.items():
make_global_config.append(f'echo -n {shq(value)} > /global-config/{name}')
make_global_config_str = '\n'.join(make_global_config)
assert instance_config.is_valid_configuration(resource_rates.keys())
return {
'name': machine_name,
'machineType': f'projects/{project}/zones/{zone}/machineTypes/{machine_type}',
'labels': {'role': 'batch2-agent', 'namespace': DEFAULT_NAMESPACE},
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': f'projects/{project}/global/images/batch-worker-12',
'diskType': f'projects/{project}/zones/{zone}/diskTypes/pd-ssd',
'diskSizeGb': str(boot_disk_size_gb),
},
},
worker_data_disk,
],
'networkInterfaces': [
{
'network': 'global/networks/default',
'networkTier': 'PREMIUM',
'accessConfigs': [{'type': 'ONE_TO_ONE_NAT', 'name': 'external-nat'}],
}
],
'scheduling': {'automaticRestart': False, 'onHostMaintenance': "TERMINATE", 'preemptible': preemptible},
'serviceAccounts': [
{
'email': f'batch2-agent@{project}.iam.gserviceaccount.com',
'scopes': ['https://www.googleapis.com/auth/cloud-platform'],
}
],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': '''
#!/bin/bash
set -x
NAME=$(curl -s http://metadata.google.internal/computeMetadata/v1/instance/name -H 'Metadata-Flavor: Google')
ZONE=$(curl -s http://metadata.google.internal/computeMetadata/v1/instance/zone -H 'Metadata-Flavor: Google')
if [ -f "/started" ]; then
echo "instance $NAME has previously been started"
while true; do
gcloud -q compute instances delete $NAME --zone=$ZONE
sleep 1
done
exit
else
touch /started
fi
curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/run_script" >./run.sh
nohup /bin/bash run.sh >run.log 2>&1 &
''',
},
{
'key': 'run_script',
'value': rf'''
#!/bin/bash
set -x
WORKER_DATA_DISK_NAME="{worker_data_disk_name}"
UNRESERVED_WORKER_DATA_DISK_SIZE_GB="{unreserved_disk_storage_gb}"
# format worker data disk
sudo mkfs.xfs -m reflink=1 -n ftype=1 /dev/$WORKER_DATA_DISK_NAME
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME
sudo mount -o prjquota /dev/$WORKER_DATA_DISK_NAME /mnt/disks/$WORKER_DATA_DISK_NAME
sudo chmod a+w /mnt/disks/$WORKER_DATA_DISK_NAME
XFS_DEVICE=$(xfs_info /mnt/disks/$WORKER_DATA_DISK_NAME | head -n 1 | awk '{{ print $1 }}' | awk 'BEGIN {{ FS = "=" }}; {{ print $2 }}')
# reconfigure docker to use local SSD
sudo service docker stop
sudo mv /var/lib/docker /mnt/disks/$WORKER_DATA_DISK_NAME/docker
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/docker /var/lib/docker
sudo service docker start
# reconfigure /batch and /logs and /gcsfuse to use local SSD
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME/batch/
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/batch /batch
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME/logs/
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/logs /logs
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME/cloudfuse/
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/cloudfuse /cloudfuse
sudo mkdir -p /etc/netns
CORES=$(nproc)
NAMESPACE=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/namespace")
ACTIVATION_TOKEN=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/activation_token")
IP_ADDRESS=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip")
PROJECT=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/project/project-id")
BATCH_LOGS_STORAGE_URI=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/batch_logs_storage_uri")
INSTANCE_ID=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/instance_id")
INSTANCE_CONFIG=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/instance_config")
MAX_IDLE_TIME_MSECS=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/max_idle_time_msecs")
NAME=$(curl -s http://metadata.google.internal/computeMetadata/v1/instance/name -H 'Metadata-Flavor: Google')
ZONE=$(curl -s http://metadata.google.internal/computeMetadata/v1/instance/zone -H 'Metadata-Flavor: Google')
BATCH_WORKER_IMAGE=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/batch_worker_image")
DOCKER_ROOT_IMAGE=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/docker_root_image")
DOCKER_PREFIX=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/docker_prefix")
INTERNAL_GATEWAY_IP=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/internal_ip")
# private job network = 172.20.0.0/16
# public job network = 172.21.0.0/16
# [all networks] Rewrite traffic coming from containers to masquerade as the host
iptables --table nat --append POSTROUTING --source 172.20.0.0/15 --jump MASQUERADE
# [public]
# Block public traffic to the metadata server
iptables --append FORWARD --source 172.21.0.0/16 --destination 169.254.169.254 --jump DROP
# But allow the internal gateway
iptables --append FORWARD --destination $INTERNAL_GATEWAY_IP --jump ACCEPT
# And this worker
iptables --append FORWARD --destination $IP_ADDRESS --jump ACCEPT
# Forbid outgoing requests to cluster-internal IP addresses
INTERNET_INTERFACE=$(ip link list | grep ens | awk -F": " '{{ print $2 }}')
iptables --append FORWARD --out-interface $INTERNET_INTERFACE ! --destination 10.128.0.0/16 --jump ACCEPT
# Setup fluentd
touch /worker.log
touch /run.log
sudo rm /etc/google-fluentd/config.d/* # remove unused config files
sudo tee /etc/google-fluentd/config.d/syslog.conf <<EOF
<source>
@type tail
format syslog
path /var/log/syslog
pos_file /var/lib/google-fluentd/pos/syslog.pos
read_from_head true
tag syslog
</source>
EOF
sudo tee /etc/google-fluentd/config.d/worker-log.conf <<EOF
<source>
@type tail
format json
path /worker.log
pos_file /var/lib/google-fluentd/pos/worker-log.pos
read_from_head true
tag worker.log
</source>
<filter worker.log>
@type record_transformer
enable_ruby
<record>
severity \${{ record["levelname"] }}
timestamp \${{ record["asctime"] }}
</record>
</filter>
EOF
sudo tee /etc/google-fluentd/config.d/run-log.conf <<EOF
<source>
@type tail
format none
path /run.log
pos_file /var/lib/google-fluentd/pos/run-log.pos
read_from_head true
tag run.log
</source>
EOF
sudo cp /etc/google-fluentd/google-fluentd.conf /etc/google-fluentd/google-fluentd.conf.bak
head -n -1 /etc/google-fluentd/google-fluentd.conf.bak | sudo tee /etc/google-fluentd/google-fluentd.conf
sudo tee -a /etc/google-fluentd/google-fluentd.conf <<EOF
labels {{
"namespace": "$NAMESPACE",
"instance_id": "$INSTANCE_ID"
}}
</match>
EOF
rm /etc/google-fluentd/google-fluentd.conf.bak
sudo service google-fluentd restart
{make_global_config_str}
# retry once
docker pull $BATCH_WORKER_IMAGE || \
(echo 'pull failed, retrying' && sleep 15 && docker pull $BATCH_WORKER_IMAGE)
BATCH_WORKER_IMAGE_ID=$(docker inspect $BATCH_WORKER_IMAGE --format='{{{{.Id}}}}' | cut -d':' -f2)
# So here I go it's my shot.
docker run \
-e CLOUD=gcp \
-e CORES=$CORES \
-e NAME=$NAME \
-e NAMESPACE=$NAMESPACE \
-e ACTIVATION_TOKEN=$ACTIVATION_TOKEN \
-e IP_ADDRESS=$IP_ADDRESS \
-e BATCH_LOGS_STORAGE_URI=$BATCH_LOGS_STORAGE_URI \
-e INSTANCE_ID=$INSTANCE_ID \
-e PROJECT=$PROJECT \
-e ZONE=$ZONE \
-e DOCKER_PREFIX=$DOCKER_PREFIX \
-e DOCKER_ROOT_IMAGE=$DOCKER_ROOT_IMAGE \
-e INSTANCE_CONFIG=$INSTANCE_CONFIG \
-e MAX_IDLE_TIME_MSECS=$MAX_IDLE_TIME_MSECS \
-e BATCH_WORKER_IMAGE=$BATCH_WORKER_IMAGE \
-e BATCH_WORKER_IMAGE_ID=$BATCH_WORKER_IMAGE_ID \
-e INTERNET_INTERFACE=$INTERNET_INTERFACE \
-e UNRESERVED_WORKER_DATA_DISK_SIZE_GB=$UNRESERVED_WORKER_DATA_DISK_SIZE_GB \
-e INTERNAL_GATEWAY_IP=$INTERNAL_GATEWAY_IP \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/run/netns:/var/run/netns:shared \
-v /usr/bin/docker:/usr/bin/docker \
-v /usr/sbin/xfs_quota:/usr/sbin/xfs_quota \
-v /batch:/batch:shared \
-v /logs:/logs \
-v /global-config:/global-config \
-v /cloudfuse:/cloudfuse:shared \
-v /etc/netns:/etc/netns \
-v /sys/fs/cgroup:/sys/fs/cgroup \
--mount type=bind,source=/mnt/disks/$WORKER_DATA_DISK_NAME,target=/host \
--mount type=bind,source=/dev,target=/dev,bind-propagation=rshared \
-p 5000:5000 \
--device /dev/fuse \
--device $XFS_DEVICE \
--device /dev \
--privileged \
--cap-add SYS_ADMIN \
--security-opt apparmor:unconfined \
--network host \
$BATCH_WORKER_IMAGE \
python3 -u -m batch.worker.worker >worker.log 2>&1
[ $? -eq 0 ] || tail -n 1000 worker.log
while true; do
gcloud -q compute instances delete $NAME --zone=$ZONE
sleep 1
done
''',
},
{
'key': 'shutdown-script',
'value': '''
set -x
INSTANCE_ID=$(curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/instance_id")
NAME=$(curl -s http://metadata.google.internal/computeMetadata/v1/instance/name -H 'Metadata-Flavor: Google')
journalctl -u docker.service > dockerd.log
''',
},
{'key': 'activation_token', 'value': activation_token},
{'key': 'batch_worker_image', 'value': BATCH_WORKER_IMAGE},
{'key': 'docker_root_image', 'value': DOCKER_ROOT_IMAGE},
{'key': 'docker_prefix', 'value': DOCKER_PREFIX},
{'key': 'namespace', 'value': DEFAULT_NAMESPACE},
{'key': 'internal_ip', 'value': INTERNAL_GATEWAY_IP},
{'key': 'batch_logs_storage_uri', 'value': file_store.batch_logs_storage_uri},
{'key': 'instance_id', 'value': file_store.instance_id},
{'key': 'max_idle_time_msecs', 'value': max_idle_time_msecs},
{
'key': 'instance_config',
'value': base64.b64encode(json.dumps(instance_config.to_dict()).encode()).decode(),
},
]
},
'tags': {'items': ["batch2-agent"]},
}
|
nilq/baby-python
|
python
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class NoaAccount(ProviderAccount):
"""Noa Account"""
pass
class NoaProvider(OAuth2Provider):
"""Provider for Noa"""
id = 'noa'
name = 'Noa'
account_class = NoaAccount
def extract_uid(self, data):
return str(data['preferred_username'])
provider_classes = [NoaProvider]
|
nilq/baby-python
|
python
|
import nltk
grammar = nltk.data.load('file:agree_adjunct.fcfg',cache=False)
parser = nltk.parse.FeatureChartParser(grammar)
agreement_test_sentences = ['Often John left','John left often',
'John often left',
'Because John left Mary cried',
'Mary cried because John left',
'Mary because John left cried',
'Through the door John left',
'John left through the door']
for sent in agreement_test_sentences:
print sent + '\n'
trees = parser.nbest_parse(sent.split())
if len(trees) == 0:
print '--> ungrammatical\n'
else:
for tree in trees:
print tree
print '\n'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2019-12-20
# @Author : mizxc
# @Email : xiangxianjiao@163.com
from flask_mongoengine import MongoEngine
from flask_login import LoginManager
db = MongoEngine()
loginManager = LoginManager()
|
nilq/baby-python
|
python
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
stack = []
while root or len(stack):
while root:
stack.append(root)
res.insert(0, root.val)
root = root.right
root = stack.pop()
root = root.left
return res
|
nilq/baby-python
|
python
|
"""
A DataNodeServer which serves APEX weather from disk. Based on the original
example, which served modified APEX weather files.
"""
import glob
import os
import six
import time
import numpy as np
from os import environ
from autobahn.wamp.types import ComponentConfig
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from twisted.internet._sslverify import OpenSSLCertificateAuthorities
from twisted.internet.ssl import CertificateOptions
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import threads
from OpenSSL import crypto
import sisock
DATA_LOCATION = "/data/"
def _build_file_list(field, start, end):
"""Build file list for given field and specified start/end range.
Args:
field (str): field name for file search (field must be in file name)
start (float): unixtime stamp for start time
end (float): unixtime stamp for end time
Returns:
list: A sorted list of files with data in the given range for the given
field
"""
t0 = time.time()
file_list = []
all_files = glob.glob(DATA_LOCATION + 'targets/*{field}*.dat'.format(field=field))
all_files.sort()
print("Processing {} files".format(len(all_files)))
# Add files once start falls in the range covered by a file, then keeping
# adding them until end falls in the range of another file. The
# construction is a bit strange, my original approach made ranges out of
# the ctimes in a file name and checked if the queried start/end times were
# in the ranges. While this approach was quick when run directly on the
# host, in a Docker container the performance suffered by a factor of
# ~3,500, for reasons I couldn't figure out.
add = False
done = False
for _file in all_files:
file_info = os.path.split(_file)[1].replace(".dat", "").split("_")
file_start = int(file_info[2])
file_end = int(file_info[3])
if done:
break
if add is False:
if start >= file_start and start <= file_end:
add = True
if end >= file_start and end <= file_end:
done = True
else:
if end >= file_start and end <= file_end:
done = True
if add:
file_list.append(_file)
file_list.sort()
print("Built file list in {} seconds".format(time.time() - t0))
return file_list
def _read_data_from_disk(file_list, start, end, max_points=None):
"""Do the I/O to get the data in file_list form disk up to end timestamp.
Args:
file_list (list): list of files to read
end (float): ending timestamp, past which we won't read data
max_points (int): maximum number of points to return
Returns:
dict: properly formatted dict for sisock to pass to grafana
"""
_data = {'data': {}, 'timeline': {}}
for _file in file_list:
file_info = os.path.split(_file)[1].replace(".dat", "").split("_")[1:]
field = file_info[0]
print("Identified field {} for file {}".format(field, _file))
# Initialize the field's data and timeline keys.
if field not in _data['data'].keys():
print("Adding {} to data dictionary".format(field))
_data['data'][field] = []
_data['timeline'][field] = {}
_data['timeline'][field]['t'] = []
_data['timeline'][field]['finalized_until'] = None
else:
print("Key {} already in data dictionary".format(field))
with open(_file, 'r') as f:
for l in f.readlines():
line = l.strip().split()
data = float(line[1])
timestamp = float(line[0])
if timestamp <= end and timestamp >= start:
_data['data'][field].append(data)
_data['timeline'][field]['t'].append(timestamp)
_data['timeline'][field]['finalized_until'] = timestamp
else:
pass
if max_points is not None:
for field in _data['data'].keys():
if max_points < len(_data['data'][field]):
limiter = range(0, len(_data['data'][field]), int(len(_data['data'][field])/max_points))
_data['data'][field] = np.array(_data['data'][field])[limiter].tolist()
_data['timeline'][field]['t'] = np.array(_data['timeline'][field]['t'])[limiter].tolist()
_data['timeline'][field]['finalized_until'] = _data['timeline'][field]['t'][-1]
return _data
class apex_weather(sisock.base.DataNodeServer):
"""A DataNodeServer serving APEX weather station information.
Inhereits from :class:`sisock.base.data_node_server`.
"""
def __init__(self, config, max_points=None):
ApplicationSession.__init__(self, config)
self.max_points = max_points
# Here we set the name of this data node server.
self.name = "apex_weather"
self.description = "Weather station information from APEX."
def get_fields(self, start, end):
"""Over-riding the parent class prototype: see the parent class for the
API."""
# Note: These could be built dynamically, however, we've been logging
# these things for ages, and they are unlikely to change. Also, things
# like the description and units are not available within each file
# like they are in the weather example.
field = {"humidity": {"description": "APEX weather station humidity.",
"timeline": "humidity",
"type": "number",
"units": '%'},
"pressure": {"description": "APEX weather station pressure.",
"timeline": "pressure",
"type": "number",
"units": 'mBar'},
"radiometer": {"description": "APEX radiometer data.",
"timeline": "radiometer",
"type": "number",
"units": 'mm'},
"dewpoint": {"description": "APEX weather station dewpoint.",
"timeline": "dewpoint",
"type": "number",
"units": 'C'},
"temperature": {"description": "APEX weather station temperature.",
"timeline": "temperature",
"type": "number",
"units": 'C'},
"windspeed": {"description": "APEX weather station windspeed.",
"timeline": "windspeed",
"type": "number",
"units": 'km/h'},
"winddirection": {"description": "APEX weather station wind direction.",
"timeline": "winddirection",
"type": "number",
"units": 'deg'}}
timeline = {"humidity": {"interval": None,
"field": "humidity"},
"pressure": {"interval": None,
"field": "pressure"},
"radiometer": {"interval": None,
"field": "radiometer"},
"dewpoint": {"interval": None,
"field": "dewpoint"},
"temperature": {"interval": None,
"field": "temperature"},
"windspeed": {"interval": None,
"field": "windspeed"},
"winddirection": {"interval": None,
"field": "winddirection"}}
return field, timeline
def _get_data_blocking(self, field, start, end, min_stride=None):
"""Over-riding the parent class prototype: see the parent class for the
API.
"""
start = sisock.base.sisock_to_unix_time(start)
end = sisock.base.sisock_to_unix_time(end)
file_list = []
for f in field:
try:
file_list += _build_file_list(f, start, end)
except IOError:
# Silently pass over a requested field that doesn't exist.
pass
print('Reading data from disk from {start} to {end}.'.format(start=start, end=end))
return _read_data_from_disk(file_list, start, end, max_points=self.max_points)
if __name__ == "__main__":
# Give time for crossbar server to start
time.sleep(5)
# Because we're using a self-signed certificate, we need to tell Twisted
# that it is OK to trust it.
cert_fname = (".crossbar/server_cert.pem")
cert = crypto.load_certificate(crypto.FILETYPE_PEM,
six.u(open(cert_fname, 'r').read()))
opt = CertificateOptions(trustRoot=OpenSSLCertificateAuthorities([cert]))
# Check variables setup when creating the Docker container.
expected_env = ['MAX_POINTS']
for var in expected_env:
try:
environ[var]
print("Found environment variable {} with value of {}.".format(var, environ[var]))
except KeyError:
environ[var] = None
print("Environment variable {} not provided. \
Setting to None and proceeding.".format(var))
# Start our component.
runner = ApplicationRunner("wss://%s:%d/ws" % (sisock.base.SISOCK_HOST, \
sisock.base.SISOCK_PORT), \
sisock.base.REALM, ssl=opt)
runner.run(apex_weather(ComponentConfig(sisock.base.REALM, {}),
max_points=int(environ['MAX_POINTS'])))
|
nilq/baby-python
|
python
|
def add(x,y):
return x + y
#print add(3,4)
print reduce(add, [1,3,5,7,9,11])
def fn(x,y):
return x*10 + y
print reduce(fn, [1,3,5,7,9])
|
nilq/baby-python
|
python
|
'''
Created by auto_sdk on 2014.11.15
'''
from aliyun.api.base import RestApi
class Slb20130221CreateLoadBalancerHTTPListenerRequest(RestApi):
def __init__(self,domain='slb.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.backendServerPort = None
self.cookie = None
self.cookieTimeout = None
self.domain = None
self.healthCheck = None
self.healthCheckTimeout = None
self.healthyThreshold = None
self.interval = None
self.listenerPort = None
self.listenerStatus = None
self.loadBalancerId = None
self.scheduler = None
self.stickySession = None
self.stickySessionType = None
self.unhealthyThreshold = None
self.uri = None
self.xForwardedFor = None
def getapiname(self):
return 'slb.aliyuncs.com.CreateLoadBalancerHTTPListener.2013-02-21'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 15:43:09 2017
@author: juherask
"""
import os
DEBUG_VERBOSITY = 3
COST_EPSILON = 1e-10
CAPACITY_EPSILON = 1e-10
# how many seconds we give to a MIP solver
MAX_MIP_SOLVER_RUNTIME = 60*10 # 10m
MIP_SOLVER_THREADS = 1 # 0 is automatic (parallel computing)
# venv does not allow use of ~ for some reason in paths on Ubuntu 20.04.
BENCHMARKS_BASEPATH = os.path.join(os.environ["HOME"], r"Projects/Research/VRPBenchmarks")
LKH_EXE_PATH = os.path.join(os.environ["HOME"], r"Projects/Research/TSP/LKH-2.0.9/LKH")
LKH_EXACT_DISTANCES_PRECISION_DECIMALS = 1000.0 # of the form 0.123
ACOTSP_EXE_PATH = os.path.join(os.environ["HOME"], r"Projects/Research/TSP/ACOTSP-master/acotsp")
ACOTSP_EXACT_DISTANCES_PRECISION_DECIMALS = 1000.0 # of the form 0.123
|
nilq/baby-python
|
python
|
"""
This program handle incomming OSC messages to MIDI
"""
import argparse
import random
import time
import json
import sqlite3
import mido
from pythonosc import dispatcher
from pythonosc import osc_server
from pythonosc import osc_message_builder
from pythonosc import udp_client
from lib.midiHelper import *
from lib.database import Database
from mappings.mapping import ControllerConfig, DawConfig
class OscToMidi:
def __init__(self, ipAddr, port ):
self.ipAddr = ipAddr
self.port = port
self.db = Database()
# Init Midi client and display available devices
midiPort = mido.get_output_names()[0]
self.midiOUT = mido.open_output(midiPort)
# Get the DAW OSC configuration
self.dawConfig = DawConfig(self.db.getDawName())
# Get the Controller MIDI configuration
self.ctrlConfig = ControllerConfig(self.db.getControllerName())
# client to send feedback request
self._oscClient = udp_client.UDPClient('10.0.0.42',3819)
def waitForOscMessage(self):
"""
Wait until osc is received
"""
self.dispatcher = dispatcher.Dispatcher()
self._routes()
msg = osc_message_builder.OscMessageBuilder(address = "/set_surface/feedback")
msg.add_arg(4095)
self._oscClient.send(msg.build())
print("Sending {}".format(msg.address))
msg = osc_message_builder.OscMessageBuilder(address = "/strip/fader")
msg.add_arg(1)
msg.add_arg(1)
self._oscClient.send(msg.build())
server = osc_server.ThreadingOSCUDPServer(
(self.ipAddr, self.port), self.dispatcher)
print("Serving on {}".format(server.server_address))
# TODO : display this config on OLED displays
server.serve_forever()
def _routes(self):
"""
Route OSC messages to corresponding controller function
"""
dc = self.dawConfig
buttonMode = self.db.getButtonMode()
self.dispatcher.map("/heartbeat", print)
# Faders
self.dispatcher.map(dc.getFaderAddress(), self._dispatchFader)
# Buttons line1
self.dispatcher.map(dc.getButtonAddress(1, buttonMode), self._dispatchButtonsLine1)
# Buttons line2
self.dispatcher.map(dc.getButtonAddress(2, buttonMode), self._dispatchButtonsLine2)
"""
# Function buttons
for fButton in dc.getFunctionAddress():
self.dispatcher.map(dc.getFunctionAddress(fButton), self._dispatchFunctionButtons, fButton )
"""
# Other
self.dispatcher.map("/debug", print)
def _dispatchFader(self, address, stripId, faderValue):
"""
Convert fader OSC value to MIDI value
"""
faderMidiRange = self.ctrlConfig.getFaderMidiRange()
faderOSCRange = self.dawConfig.getFaderOSCRange()
faderMove = self.ctrlConfig.getFaderMove("type")
readyVal = convertValueToMidiRange(faderValue, self.dawConfig.getFaderOSCRange(), self.ctrlConfig.getFaderMidiRange())
# TODO: handle bank (should be available in database or memory)
# stripId with bank handle
bank = self.db.getCurrentBank()
bankSize = self.db.getBankSize()
sId = stripId
# need to stay in 1 -> bankSize range
if(sId > bankSize):
sId = (sId % bankSize) +1
midiMessage = "{} ch: {} value:{}".format(faderMove, sId, readyVal)
print("Dispatching OSC: {} {} {} to MIDI: {} ".format(address,stripId,faderValue, midiMessage))
msg = mido.Message('pitchwheel', pitch=readyVal, channel=sId)
self.midiOUT.send(msg)
def _dispatchButtonsLine1(self, address, stripId, buttonValue):
"""
Convert Solo / Rec OSC value to MIDI value
"""
# Do nothing if not good mode
buttonMode = self.db.getButtonMode()
bank = self.db.getCurrentBank()
bankSize = self.db.getBankSize()
if buttonMode == "solomute" and "rec" in address:
return
line = 1
buttonsMidiNotes = self.ctrlConfig.getButtonNotes(line)
buttonsMidiType = self.ctrlConfig.getButtonType(line)
sId = stripId -1
# need to stay in 1 -> bankSize range
if(sId >= bankSize):
sId = (sId % bankSize)
midiNote = midiFullNoteToNumber(buttonsMidiNotes[sId])
midiVelocity = 127 #buttonsMidiValueOn if buttonValue else buttonsMidiValueOff
msg = mido.Message(buttonsMidiType, note=midiNote, velocity=midiVelocity)
print("Dispatching OSC: {} {} {} to MIDI: {} ".format(address,stripId,buttonValue, msg))
self.midiOUT.send(msg)
def _dispatchButtonsLine2(self, address, stripId, buttonValue):
"""
Convert Mute / Select OSC value to MIDI value
"""
buttonMode = self.db.getButtonMode()
bank = self.db.getCurrentBank()
bankSize = self.db.getBankSize()
# Do nothing if not good mode
if buttonMode == "solomute" and "select" in address:
return
line = 2
buttonsMidiNotes = self.ctrlConfig.getButtonNotes(line)
buttonsMidiType = self.ctrlConfig.getButtonType(line)
sId = stripId - 1
# need to stay in 1 -> bankSize range
if(sId >= bankSize):
sId = (sId % bankSize)
midiNote = midiFullNoteToNumber(buttonsMidiNotes[sId])
midiVelocity = 127 #buttonsMidiValueOn if buttonValue else buttonsMidiValueOff
msg = mido.Message(buttonsMidiType, note=midiNote, velocity=midiVelocity)
print("Dispatching OSC: {} {} {} to MIDI: {} ".format(address,stripId,buttonValue, msg))
self.midiOUT.send(msg)
def _dispatchFunctionButtons(self, address, bname):
"""
Convert Mute / Select OSC value to MIDI value
"""
bname = bname[0]
fNote = midiFullNoteToNumber(self.ctrlConfig.getfButtonNote(bname,"note"))
fVelocity = self.ctrlConfig.getfButtonNote(bname,"valueOn")
fChannel = self.ctrlConfig.getfButtonNote(bname,"ch")
fType = self.ctrlConfig.getfButtonNote(bname,"type")
msg = mido.Message(fType, note=fNote, velocity=fVelocity, channel=fChannel)
print("Dispatching OSC: {} (mapped to {}) to MIDI: {} ".format(address,bname, msg))
self.midiOUT.send(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1",
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=8000,
help="The port the OSC server is listening on")
args = parser.parse_args()
oscMIDI = OscToMidi(args.ip, args.port)
oscMIDI.waitForOscMessage()
|
nilq/baby-python
|
python
|
from builtins import range
from .partition import LabelSpacePartitioningClassifier
import copy
import random
import numpy as np
from scipy import sparse
class RakelD(LabelSpacePartitioningClassifier):
"""Distinct RAndom k-labELsets multi-label classifier."""
def __init__(self, classifier=None, labelset_size=None, require_dense=None):
super(RakelD, self).__init__(
classifier=classifier, require_dense=require_dense)
self.labelset_size = labelset_size
self.copyable_attrs = ['labelset_size', 'classifier', 'require_dense']
def generate_partition(self, X, y):
"""Randomly partition the label space
This function randomly partitions the label space of
:code:`n_labels` into :code:`n_label/k`
equipartitions of size :code:`k`. Sets
:code:`self.partition`, :code:`self.model_count` and
:code:`self.label_count`.
Parameters
-----------
X : numpy.ndarray or scipy.sparse
not used, maintained for API compatibility
y : numpy.ndarray or scipy.sparse
binary indicator matrix with label assigments of shape
:code:`(n_samples, n_labels)`
"""
label_sets = []
self.label_count = y.shape[1]
free_labels = range(self.label_count)
self.model_count = int(np.ceil(self.label_count / self.labelset_size))
while len(label_sets) <= self.model_count:
if len(free_labels) == 0:
break
if len(free_labels) < self.labelset_size:
label_sets.append(free_labels)
continue
label_set = random.sample(free_labels, self.labelset_size)
free_labels = list(set(free_labels).difference(set(label_set)))
label_sets.append(label_set)
self.partition = label_sets
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import nose
from nose.tools.trivial import eq_
from jpgrep.morpheme import tokenize
from jpgrep.morpheme import StreamDetector
class Test_tokenize(object):
def test(self):
""" 文章が適切に形態素に分解される """
text = u'吾輩は猫である'
expect = [u'吾輩', u'は', u'猫', u'で', u'ある']
tokens = tokenize(text)
eq_(tokens, expect)
class Test_StreamDetector(object):
def test_hit(self):
""" 形態素にもとづいて文章にマッチする """
query = u'吾輩'
detector = StreamDetector(query)
line = u'吾輩は猫である'
trove = detector.feed(line)
eq_(trove.line, line)
eq_(trove.position, 0)
def test_hit_tokens(self):
""" 複数の形態素でも文章にマッチする """
query = u'は猫で'
detector = StreamDetector(query)
line = u'吾輩は猫である'
trove = detector.feed(line)
eq_(trove.line, line)
eq_(trove.position, 2)
def test_miss(self):
""" 形態素にもとづいて文章にマッチしない """
query = u'輩'
detector = StreamDetector(query)
line = u'吾輩は猫である'
trove = detector.feed(line)
eq_(trove, None)
def test_inverse(self):
""" マッチしない言葉を探す """
query = u'輩'
detector = StreamDetector(query, inverse=True)
line = u'吾輩は猫である'
trove = detector.feed(line)
eq_(trove.line, line)
if __name__ == '__main__':
nose.main(argv=['nosetests', '-s', '-v'], defaultTest=__file__)
|
nilq/baby-python
|
python
|
def or_op(ctx, a, b):
if isinstance(b, list):
if a == True:
return True
if a == False:
return []
if isinstance(a, list):
return []
if isinstance(a, list):
if b == True:
return True
return []
return a or b
def and_op(ctx, a, b):
if isinstance(b, list):
if a == True:
return []
if a == False:
return False
if isinstance(a, list):
return []
if isinstance(a, list):
if b == True:
return []
return False
return a and b
def xor_op(ctx, a, b):
# If a or b are arrays, they must be the empty set.
# In that case, the result is always the empty set.
if isinstance(a, list) or isinstance(b, list):
return []
return (a and not b) or (not a and b)
def implies_op(ctx, a, b):
if isinstance(b, list):
if a == True:
return []
if a == False:
return True
if isinstance(a, list):
return []
if isinstance(a, list):
if b == True:
return True
return []
if a == False:
return True
return a and b
|
nilq/baby-python
|
python
|
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import json
import os
import chalk
import youtube_dl
import random
import io
import aiohttp
import time
import datetime
from datetime import datetime as dt
import logging
import re
from itertools import cycle
class HelpList:
def __init__(self, bot):
self.client = bot
@commands.command(pass_context=True, no_pm=True)
async def help(self, ctx):
embed1 = discord.Embed(
color = discord.Colour.orange()
)
embed1.add_field(name="**Management**", value="\u200b")
# embed1.add_field(name="Commands", icon_url=author)
embed1.add_field(name="`>invite`", value="Add ZeeBot to your servers with the invite link", inline=False)
embed1.add_field(name="`>ping`", value="Returns Pong!", inline=False)
embed1.add_field(name="`>userinfo @user`", value="Returns information about user!", inline=False)
embed1.add_field(name="`>serverinfo`", value="Returns information about server!", inline=False)
embed1.add_field(name="`>about`", value="Returns information about ZeeBot!", inline=False)
embed1.add_field(name="`>prune [amount]`", value="Deletes [amount] of messages", inline=False)
embed1.add_field(name="`>uptime`", value="ZeeBot's uptime", inline=False)
embed1.add_field(name="`>kick @user`", value="Kicks user", inline=False)
await self.client.say(embed=embed1)
embed2 = discord.Embed(
color = discord.Colour.orange()
)
embed2.add_field(name="**Music**", value="\u200b")
# embed2.add_field(name=">playurl [url]", value="Plays music from YouTube URL", inline=False)
# embed2.add_field(name=">play [song name]", value="Plays music from song name", inline=False)
# embed2.add_field(name=">queueurl [url]", value="Queue a song from url", inline=False)
# embed2.add_field(name=">queue [song name]", value="Queue a song", inline=False)
embed2.add_field(name=">play [song name / url]", value="Plays music from song name / URL. Automatically queues song.", inline=False)
embed2.add_field(name=">pause", value="Pause current music", inline=False)
embed2.add_field(name=">resume", value="Resume current music", inline=False)
embed2.add_field(name=">stop", value="Stop all music and leave from voice channel", inline=False)
embed2.add_field(name=">skip", value="Skips song. Song requester = instant skip.", inline=False)
embed2.add_field(name=">summon", value="Summons / move bot to voice channel", inline=False)
embed2.add_field(name=">playing", value="Get current song information", inline=False)
embed2.add_field(name=">volume", value="Set song volume", inline=False)
await self.client.say(embed=embed2)
embed3 = discord.Embed(
color = discord.Colour.orange()
)
embed3.add_field(name="**Games/Fun**", value="\u200b")
embed3.add_field(name=">8ball", value="Get your answers from the Magic 8 Ball", inline=False)
embed3.add_field(name=">coinflip", value="Coin Flip", inline=False)
embed3.add_field(name=">roll", value="Rolls a number from 1 to 100", inline=False)
embed3.add_field(name=">choose", value="Chooses for you. (test, test2, test3)", inline=False)
embed3.add_field(name=">gif [search]", value="Searches a random gif with related keyword", inline=False)
# await self.client.send_message(author, embed=embed) #sends message to user
await self.client.say(embed=embed3)
def setup(bot):
bot.add_cog(HelpList(bot))
|
nilq/baby-python
|
python
|
import datetime
import os
from django import forms
from django.conf import settings
from decharges.decharge.models import UtilisationTempsDecharge
from decharges.decharge.views.utils import calcul_repartition_temps
from decharges.user_manager.models import Syndicat
class UtilisationTempsDechargeForm(forms.ModelForm):
heures_d_obligation_de_service = forms.ChoiceField(
label="Heures d'obligations de service", choices=settings.CHOIX_ORS
)
int_heures_de_decharges = forms.IntegerField(
label="Heures de décharge utilisées", min_value=0, initial=0
)
minutes_de_decharges = forms.IntegerField(
label="Minutes de décharge utilisées",
min_value=0,
max_value=59,
required=False,
initial=0,
)
decharge_applicable_uniquement_sur_une_partie_de_lannee = forms.BooleanField(
label="La décharge est-elle applicable uniquement sur une partie de l'année ?",
help_text="Si cette case est décochée,"
"la décharge s'applique pour l'ensemble de l'année scolaire",
required=False,
)
def __init__(self, *args, **kwargs):
self.syndicat = kwargs.pop("syndicat")
self.annee = kwargs.pop("annee")
self.debut_de_lannee = datetime.date(year=self.annee, month=9, day=1)
self.fin_de_lannee = datetime.date(year=self.annee + 1, month=8, day=31)
self.decharges_editables = kwargs.pop("decharges_editables")
self.corps_annexe = kwargs.pop("corps_annexe")
self.federation = kwargs.pop("federation")
super().__init__(*args, **kwargs)
if self.instance and self.instance.etp_prorata < 1:
self.fields[
"decharge_applicable_uniquement_sur_une_partie_de_lannee"
].initial = True
self.fields["prenom"].label = "Prénom"
self.fields["prenom"].help_text = (
"- Doit commencer par une Majuscule <br>"
"- Ne doit pas commencer ou finir par un espace <br>"
"- Ne doit pas contenir 2 espaces consécutifs <br>"
"- Ne doit pas contenir de caractères spéciaux"
)
self.fields["prenom"].widget.attrs["placeholder"] = "ex : Michelle"
self.fields["nom"].label = "Nom"
self.fields["nom"].widget.attrs["placeholder"] = "ex : MARTIN"
self.fields["nom"].help_text = (
"- Doit être en MAJUSCULE <br>"
"- Ne doit pas commencer ou finir par un espace <br>"
"- Ne doit pas contenir 2 espaces consécutifs <br>"
"- Ne doit pas contenir de caractères spéciaux"
)
self.fields[
"code_etablissement_rne"
].help_text = (
"Le code établissement d'affectation (7 chiffres et une lettre majuscule)"
)
self.fields["code_etablissement_rne"].widget.attrs[
"placeholder"
] = "ex: 1234567A"
self.fields["date_debut_decharge"].widget.input_type = "date"
self.fields["date_debut_decharge"].widget.format = "%Y-%m-%d"
self.fields["date_debut_decharge"].widget.attrs.update(
{
"type": "date",
"min": self.debut_de_lannee,
"max": self.fin_de_lannee,
"value": self.instance.date_debut_decharge or self.debut_de_lannee,
}
)
self.fields["date_debut_decharge"].widget.attrs[
"wrapper_classes"
] = "column is-6 py-0"
self.fields["date_fin_decharge"].widget.input_type = "date"
self.fields["date_fin_decharge"].widget.format = "%Y-%m-%d"
self.fields["date_fin_decharge"].widget.attrs.update(
{
"type": "date",
"min": self.debut_de_lannee,
"max": self.fin_de_lannee,
"value": self.instance.date_fin_decharge or self.fin_de_lannee,
}
)
self.fields["date_fin_decharge"].widget.attrs[
"wrapper_classes"
] = "column is-6 py-0"
if not self.decharges_editables:
# la fédération peut choisir le syndicat qui utilise la décharge dans le formulaire
self.fields["syndicat"] = forms.ModelChoiceField(
label="Syndicat qui utilise ce temps",
queryset=Syndicat.objects.all().order_by("username"),
initial=self.syndicat,
)
if self.instance.pk:
self.fields["prenom"].widget.attrs["readonly"] = True
self.fields["nom"].widget.attrs["readonly"] = True
self.fields["code_etablissement_rne"].widget.attrs["readonly"] = True
self.fields["commentaire_de_mise_a_jour"] = forms.CharField(
label="Pourquoi cette mise à jour en cours d'année ?",
widget=forms.Textarea(),
initial=self.instance.commentaire_de_mise_a_jour,
)
if self.corps_annexe:
self.fields["corps"].help_text = (
f"Voir <a href='{self.corps_annexe.url}' target='_blank'>"
f"{os.path.basename(self.corps_annexe.name)} "
f"<span class='fa fa-external-link-alt fa-xs'></span>"
"</a> (cliquer sur le lien ne quitte pas la page actuelle)"
)
if self.federation == self.syndicat:
self.fields["est_une_decharge_solidaires"] = forms.BooleanField(
label="Est une décharge solidaires",
help_text="Cocher cette case uniquement si la décharge vient d'un autre "
"syndicat que SUD éducation",
initial=self.instance.est_une_decharge_solidaires,
required=False,
)
self.fields["int_heures_de_decharges"].initial = int(
self.instance.heures_de_decharges
)
self.fields["int_heures_de_decharges"].widget.attrs[
"wrapper_classes"
] = "column is-6 py-0"
self.fields["minutes_de_decharges"].initial = round(
(
self.instance.heures_de_decharges
- self.fields["int_heures_de_decharges"].initial
)
* 60
)
self.fields["minutes_de_decharges"].widget.attrs[
"wrapper_classes"
] = "column is-6 py-0"
def _populate_instance(self):
if self.decharges_editables:
self.instance.syndicat = self.syndicat
else:
# la fédération peut choisir le syndicat qui utilise la décharge dans le formulaire
self.instance.syndicat = self.cleaned_data["syndicat"]
self.instance.commentaire_de_mise_a_jour = self.cleaned_data.get(
"commentaire_de_mise_a_jour"
)
self.instance.annee = self.annee
self.instance.heures_de_decharges = self.cleaned_data["int_heures_de_decharges"]
self.instance.est_une_decharge_solidaires = self.cleaned_data.get(
"est_une_decharge_solidaires", False
)
if self.cleaned_data["minutes_de_decharges"]:
self.instance.heures_de_decharges += (
self.cleaned_data["minutes_de_decharges"] / 60
)
def validate_unique(self):
exclude = self._get_validation_exclusions()
exclude = set(exclude) - {
"id",
"annee",
"syndicat",
"est_une_decharge_solidaires",
"nom",
"prenom",
"code_etablissement_rne",
}
try:
self.instance.validate_unique(exclude=exclude)
except forms.ValidationError:
self._update_errors(
forms.ValidationError(
"Une décharge pour cette ou ce bénéficiaire existe déjà, "
"veuillez plutôt la mettre à jour"
)
)
def full_clean(self):
super().full_clean()
if not hasattr(self, "cleaned_data"):
return
(_, _, _, _, _, _, temps_restant, _, _,) = calcul_repartition_temps(
self.annee,
self.federation,
self.instance.syndicat,
excluded_utilisation_temps_de_decharge_pk=self.instance.pk,
)
# vérification si la décharge ne fait pas dépasser le quota de décharge du syndicat
if (
not self.instance.est_une_decharge_solidaires
and temps_restant - self.instance.etp_utilises < 0
and hasattr(self, "cleaned_data")
):
self.add_error(
None,
f"Vous dépassez le quota du syndicat, il reste {temps_restant:.3f} ETP "
f"attribuable et vous essayez d'ajouter {self.instance.etp_utilises:.3f} ETP",
)
# vérification si la décharge ne fait pas dépasser le quota de décharge du bénéficiaire
# 0.5 ETP dans l'année courante ?
decharges_annee_en_cours = UtilisationTempsDecharge.objects.filter(
nom=self.instance.nom,
prenom=self.instance.prenom,
annee=self.instance.annee,
code_etablissement_rne=self.instance.code_etablissement_rne,
).exclude(pk=self.instance.pk)
etp_consommes = sum(
decharge.etp_utilises for decharge in decharges_annee_en_cours
)
temps_restant_beneficiaire = settings.MAX_ETP_EN_UNE_ANNEE - etp_consommes
if temps_restant_beneficiaire < self.instance.etp_utilises:
self.add_error(
None,
"Vous dépassez le quota du bénéficiaire, il lui reste au maximum "
f"{temps_restant_beneficiaire:.3f} ETP à consommer "
f"et vous essayez de lui ajouter {self.instance.etp_utilises:.3f} ETP",
)
historique_decharges_beneficiaire = (
UtilisationTempsDecharge.objects.filter(
nom=self.instance.nom,
prenom=self.instance.prenom,
code_etablissement_rne=self.instance.code_etablissement_rne,
)
.exclude(pk=self.instance.pk)
.order_by("-annee")
)
etp_consecutifs = 0
annees_consecutives = 0
annee_courante = self.instance.annee
for decharge in historique_decharges_beneficiaire:
if (
annee_courante - decharge.annee
> settings.NB_ANNEES_POUR_REINITIALISER_LES_COMPTEURS
):
break
l_annee_a_changee = decharge.annee != annee_courante
annee_courante = decharge.annee
if l_annee_a_changee:
annees_consecutives += 1
etp_consecutifs += decharge.etp_utilises
# 8 années consécutives ?
if annees_consecutives >= settings.MAX_ANNEES_CONSECUTIVES:
self.add_error(
None,
f"La ou le bénéficiaire cumule déjà {settings.MAX_ANNEES_CONSECUTIVES} "
"années consécutives de décharges, il ou elle ne peut donc pas bénéficier de "
"décharges cette année",
)
# 3 ETP consécutifs ?
if etp_consecutifs + self.instance.etp_utilises >= settings.MAX_ETP_CONSECUTIFS:
self.add_error(
None,
f"La ou le bénéficiaire cumule déjà {etp_consecutifs:.3f}ETP "
"consécutifs de décharges sur les dernières années (+l'année en cours) et vous"
f" essayez de rajouter {self.instance.etp_utilises:.3f}ETP",
)
def clean(self):
self._populate_instance()
cleaned_data = super().clean()
if cleaned_data.get(
"est_une_decharge_solidaires"
) and self.federation != cleaned_data.get("syndicat", self.syndicat):
self.add_error(
"est_une_decharge_solidaires",
"La décharge ne peut provenir d'un autre syndicat uniquement "
"pour les décharges fédérales",
)
if (
cleaned_data.get("decharge_applicable_uniquement_sur_une_partie_de_lannee")
is False
):
cleaned_data["date_debut_decharge"] = self.debut_de_lannee
cleaned_data["date_fin_decharge"] = self.fin_de_lannee
date_debut_decharge = cleaned_data.get("date_debut_decharge")
date_fin_decharge = cleaned_data.get("date_fin_decharge")
if date_debut_decharge and (
date_debut_decharge > date_fin_decharge
or date_debut_decharge > self.fin_de_lannee
or date_debut_decharge < self.debut_de_lannee
):
self.add_error(
"date_debut_decharge",
"La date de début de décharge doit être une date dans l'année "
"inférieure à la date de fin de décharge",
)
if date_fin_decharge and (
date_fin_decharge < date_debut_decharge
or date_fin_decharge > self.fin_de_lannee
or date_fin_decharge < self.debut_de_lannee
):
self.add_error(
"date_fin_decharge",
"La date de fin de décharge doit être une date dans l'année "
"supérieure à la date de début de décharge",
)
return cleaned_data
class Meta:
model = UtilisationTempsDecharge
fields = [
"civilite",
"prenom",
"nom",
"heures_d_obligation_de_service",
"corps",
"code_etablissement_rne",
"int_heures_de_decharges",
"minutes_de_decharges",
"decharge_applicable_uniquement_sur_une_partie_de_lannee",
"date_debut_decharge",
"date_fin_decharge",
]
|
nilq/baby-python
|
python
|
''' Parser for creating mathematical equations.
'''
import re
from regex_parser import BaseParser
import src.svg as svg
from StringIO import StringIO
matplotlib_included = True
try:
import matplotlib
matplotlib.use('SVG')
from matplotlib import pyplot
except:
matplotlib_included = False
def register_docpicture_parser(register_parser):
register_parser(Equations)
class Equations(BaseParser):
'''a parser creating web sequence diagrams'''
def __init__(self):
self.directive_name = 'equation'
def get_svg_defs(self):
'''No svg diagrams produced by this parser.'''
return svg.Comment("ignore me")
def create_picture(self, lines):
'''Parses all received lines of code.
We assume that all lines are meant to be a single line equation
'''
if not matplotlib_included:
text = "A recent version of matplotlib is needed for this example."
warning = svg.XmlElement("pre", text=text)
warning.attributes["class"] = "warning"
return warning
equation = ' '.join(lines)
fig = pyplot.figure()
fig.set_size_inches(8, 1)
ax = fig.add_axes([0., 0., 1.0, 1.0])
ax.set_axis_off()
ax.text(0, 0, r"$%s$"%equation, color='#11557c', fontsize=25)
temp_file = StringIO()
fig.savefig(temp_file)
content = temp_file.getvalue()
temp_file.close()
lines = content.split("\n")
content = '\n'.join(lines[4:])
return content
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
import time
import json
import random
import logging
import collections
import configparser
import requests
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
HSession = requests.Session()
re_mdescape = re.compile(r'([\[\*_])')
mdescape = lambda s: re_mdescape.sub(r'\\\1', s)
class BotAPIFailed(Exception):
def __init__(self, ret):
self.ret = ret
self.description = ret['description']
self.error_code = ret['error_code']
self.parameters = ret.get('parameters')
def __repr__(self):
return 'BotAPIFailed(%r)' % self.ret
class TelegramBotClient:
def __init__(self, apitoken, username=None, config=None):
self.token = apitoken
if username:
self.username = username
else:
self.username = self.bot_api('getMe')['username']
self.config = config
self.offset = None
self.run = True
def bot_api(self, method, **params):
for att in range(3):
try:
req = HSession.post(('https://api.telegram.org/bot%s/' %
self.token) + method, data=params, timeout=45)
retjson = req.content
ret = json.loads(retjson.decode('utf-8'))
break
except Exception as ex:
if att < 1:
time.sleep((att + 1) * 2)
else:
raise ex
if not ret['ok']:
raise BotAPIFailed(ret)
return ret['result']
def parse_cmd(self, text: str):
t = text.strip().replace('\xa0', ' ').split(' ', 1)
if not t:
return None, None
cmd = t[0].rsplit('@', 1)
if len(cmd[0]) < 2 or cmd[0][0] != '/':
return None, None
if len(cmd) > 1 and cmd[-1] != self.username:
return None, None
expr = t[1] if len(t) > 1 else ''
return cmd[0][1:], expr
def serve(self, **kwargs):
'''
**kwargs is a map for callbacks. For example: {'message': process_msg}
'''
while self.run:
try:
updates = self.bot_api('getUpdates', offset=self.offset, timeout=30)
except BotAPIFailed as ex:
if ex.parameters and 'retry_after' in ex.parameters:
time.sleep(ex.parameters['retry_after'])
except Exception:
logging.exception('Get updates failed.')
continue
if not updates:
continue
self.offset = updates[-1]["update_id"] + 1
for upd in updates:
for k, v in upd.items():
if k == 'update_id':
continue
elif kwargs.get(k):
kwargs[k](self, v)
time.sleep(.2)
def __getattr__(self, name):
return lambda **kwargs: self.bot_api(name, **kwargs)
apiheader = {'X-Requested-With': 'XMLHttpRequest'}
def message_handler(cli, msg):
msgtext = msg.get('text', '')
cmd, expr = cli.parse_cmd(msgtext)
cmds = {
'pkgver': cmd_pkgver,
'search': cmd_search,
'getupdreq': cmd_getupdreq,
'start': lambda *args: None
}
if not cmd:
return
elif cmd in cmds:
try:
ret = cmds[cmd](cli, msg, expr)
logging.info('Command: ' + msgtext)
except Exception:
logging.exception('Failed command: ' + msgtext)
ret = "Failed to fetch data. Please try again later."
if not ret:
return
try:
cli.sendMessage(chat_id=msg['chat']['id'], text=ret,
parse_mode='Markdown', disable_web_page_preview=True)
except Exception:
logging.exception('Failed to send: ' + ret)
def cmd_pkgver(cli, msg, expr):
package = expr.strip()
if not package:
return
url = cli.config['API']['endpoint'] + 'packages/' + package
url2 = cli.config['API']['urlhead'] + 'packages/' + package
req = HSession.get(url, timeout=10, headers=apiheader)
d = req.json()
if req.status_code == 404:
return mdescape(d['error'])
req.raise_for_status()
pkg = d['pkg']
text = ['Package: [%s](%s)' % (package, url2),
'*source*: ' + (pkg.get('full_version') or 'missing')]
repos = collections.OrderedDict()
for repo, dpkgs in pkg['dpkg_matrix']:
for dpkg in dpkgs:
if not dpkg or dpkg['repo'] in repos:
continue
else:
repos[dpkg['repo']] = dpkg['version']
text.extend('*%s*: %s' % kv for kv in repos.items())
if pkg.get('upstream'):
text.append('*upstream*: [%s](%s)' % (
pkg['upstream']['version'], pkg['upstream']['url']))
return '\n'.join(text)
def cmd_search(cli, msg, expr):
package = expr.strip()
if not package:
return
url = cli.config['API']['endpoint'] + ('search/?q=%s&noredir=1' % package)
url2 = cli.config['API']['urlhead'] + ('search/?q=%s&noredir=1' % package)
req = HSession.get(url, timeout=10, headers=apiheader)
d = req.json()
if req.status_code == 404:
return mdescape(d['error'])
req.raise_for_status()
text = ['Search: [%s](%s)' % (package, url2)]
for pkg, _ in zip(d['packages'], range(5)):
text.append('*%s* %s' % (pkg['name'], pkg['full_version']))
return '\n'.join(text)
def cmd_getupdreq(cli, msg, expr):
url = cli.config['API']['endpoint'] + 'srcupd/aosc-os-abbs?page=all'
req = HSession.get(url, timeout=10, headers=apiheader)
d = req.json()
if req.status_code == 404:
return mdescape(d['error'])
req.raise_for_status()
text = []
for pkg in random.sample(d['packages'], 5):
text.append('*%s* [%s](%s) → [%s](%s)' % (
pkg['name'], pkg['version'],
cli.config['API']['urlhead'] + 'packages/' + pkg['name'],
pkg['upstream_version'], pkg['upstream_url']))
return '\n'.join(text)
def load_config(filename):
cp = configparser.ConfigParser()
cp.read(filename)
return cp
def main():
config = load_config('config.ini')
botcli = TelegramBotClient(
config['Bot']['apitoken'], config['Bot'].get('username'), config)
logging.info('Satellite launched.')
botcli.serve(message=message_handler)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
a = mc.get_mc_argument_parser()
a.description = """Read the obsid from a file and create a record in M&C."""
a.add_argument('files', metavar='file', type=str, nargs='*', default=[],
help='*.uvh5 files to add')
args = a.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data file is uvh5
uv = UVData()
uv.read_uvh5(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale='utc', format='jd')
stoptime = Time(times[-1], scale='utc', format='jd')
obsid = int(np.floor(starttime.gps))
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) > 0:
print("observation {obs} already in M&C, skipping".format(obs=obsid))
continue
print("Inserting obsid into M&C:" + str(obsid))
session.add_obs(starttime, stoptime, obsid)
session.commit()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames using webcam
Usage
-----
flow_rotation.py
Keys
----
r - reset accumulated rotation
ESC - exit
'''
import cv2
import numpy as np
import math
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,640)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,480)
track_len = 10
rotation_history_len = 300
detect_interval = 5
tracks = []
rotation_track = []
frame_idx = 0
prev_gray = 0
total_rot = 0
while True:
ret,frame = cap.load()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if len(tracks) > 0:
img0, img1 = prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
curr_pts = []
prev_pts = []
for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
curr_pts.append(tr[len(tr)-1])
prev_pts.append(tr[len(tr)-2])
if len(tr) > track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
tracks = new_tracks
cv2.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
if len(curr_pts) > 4:
d,Z, tform = procrustes(np.array(prev_pts), np.array(curr_pts))
viewRotation = math.atan2(tform['rotation'][0, 1], tform['rotation'][0, 0])
total_rot += viewRotation
#print rotation rate to console
print viewRotation
#create a history for plotting
rotation_track.append(viewRotation)
if len(rotation_track) > rotation_history_len:
del rotation_track[0]
#plot rotation
center = (vis.shape[1]/2,vis.shape[0]/2)
x = 0
x_step = vis.shape[1]/rotation_history_len
rot_scale = 200
prev_rot = 0
#plot rotation line
cv2.line(vis,center, (center[0] + int(rot_scale*math.cos(-total_rot)),center[1] + int(rot_scale*math.sin(-total_rot))),(0, 255, 0))
#plot rotation history
for viewRotation in rotation_track:
cv2.line(vis, (x, (int(prev_rot*rot_scale) + vis.shape[0]/2)), (x+x_step,int(viewRotation*rot_scale) + vis.shape[0]/2), (0, 0, 255))
prev_rot = viewRotation
x += x_step
if frame_idx % detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
tracks.append([(x, y)])
frame_idx += 1
prev_gray = frame_gray
cv2.imshow('flow rotation', vis)
ch = cv2.waitKey(1)
if ch == 27:
break
if ch == ord('r'):
total_rot = 0
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Common for connections to Impala. Currently supports Beeswax connections and
# in the future will support HS2 connections. Provides tracing around all
# operations.
import abc
import logging
import re
import impala.dbapi as impyla
import tests.common
from RuntimeProfile.ttypes import TRuntimeProfileFormat
from tests.beeswax.impala_beeswax import ImpalaBeeswaxClient
LOG = logging.getLogger('impala_connection')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# All logging needs to be either executable SQL or a SQL comment (prefix with --).
console_handler.setFormatter(logging.Formatter('%(message)s'))
LOG.addHandler(console_handler)
LOG.propagate = False
# Regular expression that matches the "progress" entry in the HS2 log.
PROGRESS_LOG_RE = re.compile(
r'^Query [a-z0-9:]+ [0-9]+% Complete \([0-9]+ out of [0-9]+\)$')
MAX_SQL_LOGGING_LENGTH = 128 * 1024
# test_exprs.py's TestExprLimits executes extremely large SQLs (multiple MBs). It is the
# only test that runs SQL larger than 128KB. Logging these SQLs in execute() increases
# the size of the JUnitXML files, causing problems for users of JUnitXML like Jenkins.
# This function limits the size of the SQL logged if it is larger than 128KB.
def log_sql_stmt(sql_stmt):
"""If the 'sql_stmt' is shorter than MAX_SQL_LOGGING_LENGTH, log it unchanged. If
it is larger than MAX_SQL_LOGGING_LENGTH, truncate it and comment it out."""
if (len(sql_stmt) <= MAX_SQL_LOGGING_LENGTH):
LOG.info("{0};\n".format(sql_stmt))
else:
# The logging output should be valid SQL, so the truncated SQL is commented out.
LOG.info("-- Skip logging full SQL statement of length {0}".format(len(sql_stmt)))
LOG.info("-- Logging a truncated version, commented out:")
for line in sql_stmt[0:MAX_SQL_LOGGING_LENGTH].split("\n"):
LOG.info("-- {0}".format(line))
LOG.info("-- [...]")
# Common wrapper around the internal types of HS2/Beeswax operation/query handles.
class OperationHandle(object):
def __init__(self, handle, sql_stmt):
self.__handle = handle
self.__sql_stmt = sql_stmt
def get_handle(self):
return self.__handle
def sql_stmt(self):
return self.__sql_stmt
# Represents an Impala connection.
class ImpalaConnection(object):
__metaclass__ = abc.ABCMeta
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@abc.abstractmethod
def set_configuration_option(self, name, value):
"""Sets a configuraiton option name to the given value"""
pass
def set_configuration(self, config_option_dict):
"""Replaces existing configuration with the given dictionary"""
assert config_option_dict is not None, "config_option_dict cannot be None"
self.clear_configuration()
for name, value in config_option_dict.iteritems():
self.set_configuration_option(name, value)
@abc.abstractmethod
def clear_configuration(self):
"""Clears all existing configuration."""
pass
@abc.abstractmethod
def get_default_configuration(self):
"""Return the default configuration for the connection, before any modifications are
made to the session state. Returns a map with the config variable as the key and a
string representation of the default value as the value."""
pass
@abc.abstractmethod
def connect(self):
"""Opens the connection"""
pass
@abc.abstractmethod
def close(self):
"""Closes the connection. Can be called multiple times"""
pass
@abc.abstractmethod
def close_query(self, handle):
"""Closes the query."""
pass
@abc.abstractmethod
def get_state(self, operation_handle):
"""Returns the state of a query"""
pass
@abc.abstractmethod
def state_is_finished(self, operation_handle):
"""Returns whether the state of a query is finished"""
pass
@abc.abstractmethod
def get_log(self, operation_handle):
"""Returns the log of an operation as a string, with entries separated by newlines."""
pass
@abc.abstractmethod
def cancel(self, operation_handle):
"""Cancels an in-flight operation"""
pass
def execute(self, sql_stmt):
"""Executes a query and fetches the results"""
pass
@abc.abstractmethod
def execute_async(self, sql_stmt):
"""Issues a query and returns the handle to the caller for processing. Only one
async operation per connection at a time is supported, due to limitations of the
Beeswax protocol and the Impyla client."""
pass
@abc.abstractmethod
def fetch(self, sql_stmt, operation_handle, max_rows=-1):
"""Fetches query results up to max_rows given a handle and sql statement.
If max_rows < 0, all rows are fetched. If max_rows > 0 but the number of
rows returned is less than max_rows, all the rows have been fetched."""
pass
# Represents a connection to Impala using the Beeswax API.
class BeeswaxConnection(ImpalaConnection):
def __init__(self, host_port, use_kerberos=False, user=None, password=None,
use_ssl=False):
self.__beeswax_client = ImpalaBeeswaxClient(host_port, use_kerberos, user=user,
password=password, use_ssl=use_ssl)
self.__host_port = host_port
self.QUERY_STATES = self.__beeswax_client.query_states
def set_configuration_option(self, name, value):
# Only set the option if it's not already set to the same value.
if self.__beeswax_client.get_query_option(name) != value:
LOG.info('SET %s=%s;' % (name, value))
self.__beeswax_client.set_query_option(name, value)
def get_default_configuration(self):
result = {}
for item in self.__beeswax_client.get_default_configuration():
result[item.key] = item.value
return result
def clear_configuration(self):
self.__beeswax_client.clear_query_options()
# A hook in conftest sets tests.common.current_node.
if hasattr(tests.common, "current_node"):
self.set_configuration_option("client_identifier", tests.common.current_node)
def connect(self):
LOG.info("-- connecting to: %s" % self.__host_port)
self.__beeswax_client.connect()
# TODO: rename to close_connection
def close(self):
LOG.info("-- closing connection to: %s" % self.__host_port)
self.__beeswax_client.close_connection()
def close_query(self, operation_handle):
LOG.info("-- closing query for operation handle: %s" % operation_handle)
self.__beeswax_client.close_query(operation_handle.get_handle())
def close_dml(self, operation_handle):
LOG.info("-- closing DML query for operation handle: %s" % operation_handle)
self.__beeswax_client.close_dml(operation_handle.get_handle())
def execute(self, sql_stmt, user=None):
LOG.info("-- executing against %s\n" % (self.__host_port))
log_sql_stmt(sql_stmt)
return self.__beeswax_client.execute(sql_stmt, user=user)
def execute_async(self, sql_stmt, user=None):
LOG.info("-- executing async: %s\n" % (self.__host_port))
log_sql_stmt(sql_stmt)
beeswax_handle = self.__beeswax_client.execute_query_async(sql_stmt, user=user)
return OperationHandle(beeswax_handle, sql_stmt)
def cancel(self, operation_handle):
LOG.info("-- canceling operation: %s" % operation_handle)
return self.__beeswax_client.cancel_query(operation_handle.get_handle())
def get_state(self, operation_handle):
LOG.info("-- getting state for operation: %s" % operation_handle)
return self.__beeswax_client.get_state(operation_handle.get_handle())
def state_is_finished(self, operation_handle):
LOG.info("-- checking finished state for operation: {0}".format(operation_handle))
return self.get_state(operation_handle) == self.QUERY_STATES["FINISHED"]
def get_exec_summary(self, operation_handle):
LOG.info("-- getting exec summary operation: %s" % operation_handle)
return self.__beeswax_client.get_exec_summary(operation_handle.get_handle())
def get_runtime_profile(self, operation_handle):
LOG.info("-- getting runtime profile operation: %s" % operation_handle)
return self.__beeswax_client.get_runtime_profile(operation_handle.get_handle())
def wait_for_finished_timeout(self, operation_handle, timeout):
LOG.info("-- waiting for query to reach FINISHED state: %s" % operation_handle)
return self.__beeswax_client.wait_for_finished_timeout(
operation_handle.get_handle(), timeout)
def wait_for_admission_control(self, operation_handle):
LOG.info("-- waiting for completion of the admission control processing of the "
"query: %s" % operation_handle)
return self.__beeswax_client.wait_for_admission_control(operation_handle.get_handle())
def get_admission_result(self, operation_handle):
LOG.info("-- getting the admission result: %s" % operation_handle)
return self.__beeswax_client.get_admission_result(operation_handle.get_handle())
def get_log(self, operation_handle):
LOG.info("-- getting log for operation: %s" % operation_handle)
return self.__beeswax_client.get_log(operation_handle.get_handle().log_context)
def fetch(self, sql_stmt, operation_handle, max_rows = -1):
LOG.info("-- fetching results from: %s" % operation_handle)
return self.__beeswax_client.fetch_results(
sql_stmt, operation_handle.get_handle(), max_rows)
class ImpylaHS2Connection(ImpalaConnection):
"""Connection to Impala using the impyla client connecting to HS2 endpoint.
impyla implements the standard Python dbabi: https://www.python.org/dev/peps/pep-0249/
plus Impala-specific extensions, e.g. for fetching runtime profiles.
TODO: implement support for kerberos, SSL, etc.
"""
def __init__(self, host_port, use_kerberos=False, is_hive=False,
use_http_transport=False, http_path=""):
self.__host_port = host_port
self.__use_http_transport = use_http_transport
self.__http_path = http_path
if use_kerberos:
raise NotImplementedError("Kerberos support not yet implemented")
# Impyla connection and cursor is initialised in connect(). We need to reuse the same
# cursor for different operations (as opposed to creating a new cursor per operation)
# so that the session is preserved. This means that we can only execute one operation
# at a time per connection, which is a limitation also imposed by the Beeswax API.
self.__impyla_conn = None
self.__cursor = None
# Query options to send along with each query.
self.__query_options = {}
self._is_hive = is_hive
def set_configuration_option(self, name, value):
self.__query_options[name] = str(value)
def get_default_configuration(self):
return self.__default_query_options.copy()
def clear_configuration(self):
self.__query_options.clear()
if hasattr(tests.common, "current_node") and not self._is_hive:
self.set_configuration_option("client_identifier", tests.common.current_node)
def connect(self):
LOG.info("-- connecting to {0} with impyla".format(self.__host_port))
host, port = self.__host_port.split(":")
conn_kwargs = {}
if self._is_hive:
conn_kwargs['auth_mechanism'] = 'PLAIN'
self.__impyla_conn = impyla.connect(host=host, port=int(port),
use_http_transport=self.__use_http_transport,
http_path=self.__http_path, **conn_kwargs)
# Get the default query options for the session before any modifications are made.
self.__cursor = self.__impyla_conn.cursor(convert_types=False)
self.__default_query_options = {}
if not self._is_hive:
self.__cursor.execute("set all")
for name, val, _ in self.__cursor:
self.__default_query_options[name] = val
self.__cursor.close_operation()
LOG.debug("Default query options: {0}".format(self.__default_query_options))
def close(self):
LOG.info("-- closing connection to: {0}".format(self.__host_port))
try:
# Explicitly close the cursor so that it will close the session.
self.__cursor.close()
except Exception as e:
# The session may no longer be valid if the impalad was restarted during the test.
pass
try:
self.__impyla_conn.close()
except AttributeError as e:
# When the HTTP endpoint restarts, Thrift HTTP will close the endpoint and calling
# close() will result in an exception.
if not (self.__use_http_transport and 'NoneType' in str(e)):
raise
def close_query(self, operation_handle):
LOG.info("-- closing query for operation handle: {0}".format(operation_handle))
operation_handle.get_handle().close_operation()
def execute(self, sql_stmt, user=None, profile_format=TRuntimeProfileFormat.STRING):
handle = self.execute_async(sql_stmt, user)
r = None
try:
r = self.__fetch_results(handle, profile_format=profile_format)
finally:
if r is None:
# Try to close the query handle but ignore any exceptions not to replace the
# original exception raised by '__fetch_results'.
try:
self.close_query(handle)
except Exception:
pass
else:
self.close_query(handle)
return r
def execute_async(self, sql_stmt, user=None):
LOG.info("-- executing against {0} at {1}\n".format(
self._is_hive and 'Hive' or 'Impala', self.__host_port))
log_sql_stmt(sql_stmt)
if user is not None:
raise NotImplementedError("Not yet implemented for HS2 - authentication")
try:
self.__cursor.execute_async(sql_stmt, configuration=self.__query_options)
handle = OperationHandle(self.__cursor, sql_stmt)
LOG.info("Started query {0}".format(self.get_query_id(handle)))
return handle
except Exception:
self.__cursor.close_operation()
raise
def cancel(self, operation_handle):
LOG.info("-- canceling operation: {0}".format(operation_handle))
cursor = operation_handle.get_handle()
return cursor.cancel_operation(reset_state=False)
def get_query_id(self, operation_handle):
"""Return the string representation of the query id."""
guid_bytes = \
operation_handle.get_handle()._last_operation.handle.operationId.guid
return "{0}:{1}".format(guid_bytes[7::-1].encode('hex_codec'),
guid_bytes[16:7:-1].encode('hex_codec'))
def get_state(self, operation_handle):
LOG.info("-- getting state for operation: {0}".format(operation_handle))
cursor = operation_handle.get_handle()
return cursor.status()
def state_is_finished(self, operation_handle):
LOG.info("-- checking finished state for operation: {0}".format(operation_handle))
cursor = operation_handle.get_handle()
# cursor.status contains a string representation of one of
# TCLIService.TOperationState.
return cursor.status() == "FINISHED_STATE"
def get_exec_summary(self, operation_handle):
LOG.info("-- getting exec summary operation: {0}".format(operation_handle))
cursor = operation_handle.get_handle()
# summary returned is thrift, not string.
return cursor.get_summary()
def get_runtime_profile(self, operation_handle, profile_format):
LOG.info("-- getting runtime profile operation: {0}".format(operation_handle))
cursor = operation_handle.get_handle()
return cursor.get_profile(profile_format=profile_format)
def wait_for_finished_timeout(self, operation_handle, timeout):
LOG.info("-- waiting for query to reach FINISHED state: {0}".format(operation_handle))
raise NotImplementedError("Not yet implemented for HS2 - states differ from beeswax")
def wait_for_admission_control(self, operation_handle):
LOG.info("-- waiting for completion of the admission control processing of the "
"query: {0}".format(operation_handle))
raise NotImplementedError("Not yet implemented for HS2 - states differ from beeswax")
def get_admission_result(self, operation_handle):
LOG.info("-- getting the admission result: {0}".format(operation_handle))
raise NotImplementedError("Not yet implemented for HS2 - states differ from beeswax")
def get_log(self, operation_handle):
LOG.info("-- getting log for operation: {0}".format(operation_handle))
# HS2 includes non-error log messages that we need to filter out.
cursor = operation_handle.get_handle()
lines = [line for line in cursor.get_log().split('\n')
if not PROGRESS_LOG_RE.match(line)]
return '\n'.join(lines)
def fetch(self, sql_stmt, handle, max_rows=-1):
LOG.info("-- fetching results from: {0}".format(handle))
return self.__fetch_results(handle, max_rows)
def __fetch_results(self, handle, max_rows=-1,
profile_format=TRuntimeProfileFormat.STRING):
"""Implementation of result fetching from handle."""
cursor = handle.get_handle()
assert cursor is not None
# Don't fetch data for queries with no results.
result_tuples = None
column_labels = None
column_types = None
if cursor.has_result_set:
desc = cursor.description
column_labels = [col_desc[0].upper() for col_desc in desc]
column_types = [col_desc[1].upper() for col_desc in desc]
if max_rows < 0:
result_tuples = cursor.fetchall()
else:
result_tuples = cursor.fetchmany(max_rows)
elif self._is_hive:
# For Hive statements that have no result set (eg USE), they may still be
# running, and we need to wait for them to finish before we can proceed.
cursor._wait_to_finish()
if not self._is_hive:
log = self.get_log(handle)
profile = self.get_runtime_profile(handle, profile_format=profile_format)
else:
log = None
profile = None
return ImpylaHS2ResultSet(success=True, result_tuples=result_tuples,
column_labels=column_labels, column_types=column_types,
query=handle.sql_stmt(), log=log, profile=profile)
class ImpylaHS2ResultSet(object):
"""This emulates the interface of ImpalaBeeswaxResult so that it can be used in
place of it. TODO: when we deprecate/remove Beeswax, clean this up."""
def __init__(self, success, result_tuples, column_labels, column_types, query, log,
profile):
self.success = success
self.column_labels = column_labels
self.column_types = column_types
self.query = query
self.log = log
self.profile = profile
self.__result_tuples = result_tuples
# self.data is the data in the ImpalaBeeswaxResult format: a list of rows with each
# row represented as a tab-separated string.
self.data = None
if result_tuples is not None:
self.data = [self.__convert_result_row(tuple) for tuple in result_tuples]
def __convert_result_row(self, result_tuple):
"""Take primitive values from a result tuple and construct the tab-separated string
that would have been returned via beeswax."""
return '\t'.join([self.__convert_result_value(val) for val in result_tuple])
def __convert_result_value(self, val):
"""Take a primitive value from a result tuple and its type and construct the string
that would have been returned via beeswax."""
if val is None:
return 'NULL'
if type(val) == float:
# Same format as what Beeswax uses in the backend.
return "{:.16g}".format(val)
else:
return str(val)
def create_connection(host_port, use_kerberos=False, protocol='beeswax',
is_hive=False):
if protocol == 'beeswax':
c = BeeswaxConnection(host_port=host_port, use_kerberos=use_kerberos)
elif protocol == 'hs2':
c = ImpylaHS2Connection(host_port=host_port, use_kerberos=use_kerberos,
is_hive=is_hive)
else:
assert protocol == 'hs2-http'
c = ImpylaHS2Connection(host_port=host_port, use_kerberos=use_kerberos,
is_hive=is_hive, use_http_transport=True, http_path='cliservice')
# A hook in conftest sets tests.common.current_node. Skip for Hive connections since
# Hive cannot modify client_identifier at runtime.
if hasattr(tests.common, "current_node") and not is_hive:
c.set_configuration_option("client_identifier", tests.common.current_node)
return c
def create_ldap_connection(host_port, user, password, use_ssl=False):
return BeeswaxConnection(host_port=host_port, user=user, password=password,
use_ssl=use_ssl)
|
nilq/baby-python
|
python
|
from hcipy import *
import numpy as np
def check_energy_conservation(shift_input, scale, shift_output, q, fov, dims):
grid = make_uniform_grid(dims, 1).shifted(shift_input).scaled(scale)
f_in = Field(np.random.randn(grid.size), grid)
#f_in = Field(np.exp(-30 * grid.as_('polar').r**2), grid)
fft = FastFourierTransform(grid, q=q, fov=fov, shift=shift_output)
mft = MatrixFourierTransform(grid, fft.output_grid)
nft = NaiveFourierTransform(grid, fft.output_grid, True)
nft2 = NaiveFourierTransform(grid, fft.output_grid, False)
fourier_transforms = [fft, mft, nft, nft2]
energy_ratios = []
patterns_match = []
for ft1 in fourier_transforms:
for ft2 in fourier_transforms:
f_inter = ft1.forward(f_in)
f_out = ft2.backward(f_inter)
energy_in = np.sum(np.abs(f_in)**2 * f_in.grid.weights)
energy_out = np.sum(np.abs(f_out)**2 * f_out.grid.weights)
energy_ratio = energy_out / energy_in
pattern_match = np.abs(f_out - f_in).max() / f_in.max()
if fov == 1:
# If the full fov is retained, energy and pattern should be conserved
# for all fourier transform combinations.
assert np.allclose(f_in, f_out)
assert np.allclose(energy_in, energy_out)
energy_ratios.append(energy_ratio)
patterns_match.append(pattern_match)
energy_ratios = np.array(energy_ratios).reshape((len(fourier_transforms), len(fourier_transforms)))
patterns_match = np.array(patterns_match).reshape((len(fourier_transforms), len(fourier_transforms)))
# If the full fov is not retained, the pattern and energy loss should be the same
# for all fourier transform combinations.
if fov != 1:
assert np.allclose(energy_ratios, energy_ratios[0, 0])
assert np.allclose(patterns_match, patterns_match[0, 0])
def test_fourier_energy_conservation_1d():
for shift_input in [0,0.1]:
for scale in [1,2]:
for shift_output in [0,0.1]:
for q in [1,3,4]:
for fov in [1, 0.5, 0.8]:
for dims in [64, 65]:
check_energy_conservation(shift_input, scale, shift_output, q, fov, dims)
def test_fourier_energy_conservation_2d():
for shift_input in [[0,0],[0.1]]:
for scale in [1,2]:
for shift_output in [[0,0], [0.1]]:
for q in [1,3,4]:
for fov in [1,0.5,0.8]:
for dims in [[8,8],[8,16],[9,9],[9,18]]:
check_energy_conservation(shift_input, scale, shift_output, q, fov, dims)
def check_symmetry(scale, q, fov, dims):
pass
def test_fourier_symmetries_2d():
for scale in [1,2]:
for q in [1,3,4]:
for fov in [1,0.5,0.8]:
for dims in [[8,8],[8,16],[9,9],[9,18]]:
check_symmetry(scale, q, fov, dims)
def test_make_fourier_transform():
input_grid = make_pupil_grid(128)
ft = make_fourier_transform(input_grid, q=1, fov=1, planner='estimate')
assert type(ft) == FastFourierTransform
ft = make_fourier_transform(input_grid, q=8, fov=0.3, planner='estimate')
assert type(ft) == MatrixFourierTransform
ft = make_fourier_transform(input_grid, q=1, fov=1, planner='measure')
assert type(ft) == FastFourierTransform
ft = make_fourier_transform(input_grid, q=8, fov=0.1, planner='measure')
assert type(ft) == MatrixFourierTransform
output_grid = CartesianGrid(UnstructuredCoords([np.random.randn(100), np.random.randn(100)]))
ft = make_fourier_transform(input_grid, output_grid)
assert type(ft) == NaiveFourierTransform
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2018 StrTrek Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# System Required
import os
import logging
# Outer Required
import pandas as pd
import numpy as np
# Inner Required
from Babelor.Presentation import URL, MSG
# Global Parameters
from Babelor.Config import CONFIG
class FILE:
def __init__(self, conn: (URL, str)):
if isinstance(conn, str):
self.conn = URL(conn)
else:
self.conn = conn
if os.path.splitext(self.conn.path)[-1] in [""]:
self.url_is_dir = True
else:
self.url_is_dir = False
def read(self, msg: MSG):
# logging.debug("FILE::{0}::READ msg:{1}".format(self.conn, msg))
# -------------------------------------------------
rm_idx = []
for i in range(0, msg.args_count, 1):
arguments = msg.read_args(i)
if self.url_is_dir:
path = os.path.join(self.conn.path, arguments["path"])
else:
path = self.conn.path
suffix = os.path.splitext(path)[-1]
# -------------------------------
if os.path.isfile(path):
if suffix in [".xls", ".xlsx"]:
if self.url_is_dir:
datum = pd.read_excel(path)
else:
datum = pd.read_excel(path, sheet_name=arguments["path"])
elif suffix in [".npy"]:
datum = np.load(path)
else:
with open(path, "rb") as file:
datum = file.read()
msg.add_datum(datum, arguments["path"])
logging.info("FILE::{0}::READ successfully.".format(path))
else:
logging.warning("FILE::{0}::READ failed.".format(path))
rm_idx = [i] + rm_idx
# -------------------------------
if CONFIG.IS_DATA_READ_START:
for i in rm_idx:
msg.remove_args(i)
logging.info("FILE::{0}::READ successfully.".format(self.conn))
return msg
def write(self, msg: MSG):
# logging.debug("FILE::{0}::WRITE msg:{1}".format(self.conn, msg))
if self.url_is_dir:
if not os.path.exists(self.conn.path):
os.mkdir(self.conn.path)
# -------------------------------
rm_idx = []
for i in range(0, msg.dt_count, 1):
dt = msg.read_datum(i)
if self.url_is_dir:
path = os.path.join(self.conn.path, dt["path"])
else:
path = self.conn.path
suffix = os.path.splitext(path)[-1]
# -------------------------------
if os.path.exists(path):
logging.warning("FILE::{0}::WRITE failed.".format(path))
elif os.path.isfile(os.path.split(path)[0]):
logging.warning("FILE::{0}::WRITE failed.".format(path))
else:
if not os.path.isdir(os.path.split(path)[0]):
mkdir(os.path.split(path)[0])
# -------------------------------
if suffix in [".xls", ".xlsx"]:
if isinstance(dt["stream"], pd.DataFrame):
dt["stream"].to_excel(path, index=False)
logging.info("FILE::EXCEL::{0}::WRITE successfully.".format(path))
else:
logging.warning("FILE::EXCEL::{0}::WRITE failed.".format(path))
elif suffix in [".npy"]:
if isinstance(dt["stream"], np.ndarray):
np.save(path, dt["stream"])
logging.info("FILE::NUMPY::{0}::WRITE successfully.".format(path))
else:
logging.warning("FILE::NUMPY::{0}::WRITE failed.".format(path))
elif suffix in [""]:
logging.warning("FILE::{0}::WRITE None.".format(path))
else:
with open(path, "wb") as file:
file.write(dt["stream"])
logging.info("FILE::{0}::WRITE successfully.".format(path))
rm_idx = [i] + rm_idx
# -------------------------------
if CONFIG.IS_DATA_WRITE_END:
for i in rm_idx:
msg.remove_datum(i)
def mkdir(file_path: str):
dir_path = os.path.split(file_path)[0]
if os.path.exists(file_path):
if os.path.isfile(file_path):
os.remove(file_path)
else:
os.rmdir(file_path)
if os.path.exists(dir_path):
if os.path.isfile(dir_path):
os.remove(dir_path)
else:
pass
else:
os.mkdir(dir_path)
def sheets_merge(read_path, write_path):
"""
:param read_path: 读取路径
:param write_path: 写入路径
:return: None
"""
import xlrd
book = xlrd.open_workbook(read_path)
writer = None
for sheet in book.sheets():
reader = pd.read_excel(read_path, sheet_name=sheet.name)
if writer is None:
writer = reader
else:
writer = writer.append(reader.fillna("")) # NaN clean up
writer = writer.reset_index(drop=True) # idx clean up
writer.to_excel(write_path)
|
nilq/baby-python
|
python
|
#-------------------------------------------------------------------------------
# Name: Spatial Parser Helper functions
# Purpose: A suite of functions which are used by the SpatialParser
# class.
#
# Author: Ashwath Sampath
# Based on: http://mentalmodels.princeton.edu/programs/space-6.lisp
# Created: 01-05-2018
# Copyright: (c) Ashwath Sampath 2018
#-------------------------------------------------------------------------------
""" Module of functions used by the SpatialParser class in
spatial_parser.py. Based on LISP code developed by
PN Johnson-Laird and R.Byrne as part of their 1991 book
'Deduction' and their 1989 paper 'Spatial Reasoning'. """
import copy
def syntax_rule(lisrules, lhs, gram):
""" SYNTACTIC CATEGORIES AND RULES
This func. returns first of lisrules after item that matches lhs,
i.e. a complete grammatical rule. Normally (when not called by
backtractk), it just returns the first (only) rule in the lisrules list."""
if lisrules == []:
return []
if lhs is None:
return lisrules[0]
# lhs is not none
rhs = expand(lhs, gram)
semantics = rule_semantics(lhs, gram)
lis1 = [rhs, [lhs, semantics]]
# Return the first rule after lis1 in lisrules. If lis1 is the last
# rule of lisrules, member_lis returns [].
result = member_lis(lis1, lisrules)[0]
return result
def member_lis(lis1, lis2):
""" If lis1 is last item in lis2, it returns the rest of lis2."""
found_at = -1
if lis1 is None or lis1 == []:
return []
for index, rule in enumerate(lis2):
if lis1 == rule:
found_at = index
break
# lis1 found at last pos in lis2, return [] as nothing is
#lis2 after this.
if found_at == len(lis2) - 1:
return []
# Return sub-lists after the index found_at, i.e return all
# the elements in lis2 after element lis1.
return lis2[found_at+1:]
def rule_list(syn_stack, gram):
""" This function returns a list of rules (in complete form) whose
expansions when reversed match the items at the top of the syn-stack
(stack with semantic items stripped off), using matchrule. """
list_of_rules = []
for rule in gram:
# A deep copy of rhs is necessary: we need to only reverse the copy,
# otherwise the original rule in gram gets modified.
rhs = rhs_of_rule(rule)
revrhs = copy.deepcopy(rhs)
revrhs.reverse()
if match_rule(revrhs, syn_stack):
list_of_rules.append(rule)
return list_of_rules
def match_rule(revrule, syn_stack):
""" This function matches reversed rhs of rule with syn-stack.
It returns True if there is a match, false if there isn't. """
if len(syn_stack) < len(revrule):
return False
for i, term in enumerate(revrule):
if term != syn_stack[i]:
return False
return True
def lexical_category(item, lex, lexcat):
""" This funtion returns category of item in lexicon, allowing
for ambiguity in lexicon (through parameter lexcat). If the
item doesn't exist in the lexicon, it returns None"""
# if item is not a word (i.e. a terminal symbol), it will be a
# list -> we can't get a lexical category.
if isinstance(item, list):
return None
if item in lex:
# E.g. lex[item] = ['art-indef', []]
return legal_cat(lexcat, lex[item])
print("symbol '{}' not in lexicon".format(item))
return None
def legal_cat(lexcat, lis):
""" This function takes lis and lexical category, lexcat, and
returns next item in lis after lexcat or else if none, None.
In practice, it takes a lexcat and the rhs of the
lexicon it comes from and returns next lexcat if any """
if lexcat is None:
return lis
# Otherwise, return 1st item after lexcat in lis.
after_lexcat = member_lis(lexcat, [lis])
if after_lexcat == []:
# Lexcat is the last term of lus
return None
# Return next item after lexcat
return after_lexcat[0]
def word(item, lex):
"""This function returns true if item is word in lexicon that has
not been analyzed, i.e. it has no attached syntactic category"""
# If item is a key in lex, return True
if isinstance(item, list):
return False
if item in lex:
return True
return False
def sem_of_rule(rule):
""" Given a grammatical rule, this function returns the semantic
part of it. """
return rule[1][1]
def rule_semantics(lhs, gram):
""" Returns the semantic part of a given rule given its lhs.
Eg. ['S',2] returns [['S', 2], 's_neg_sem']]"""
for rule in gram:
if lhs_of_rule(rule, gram) == lhs[0]:
return sem_of_rule(rule)
return None # CHECK
def lhs_of_rule(rule, gram):
""" Given a rule such as (S 1) -> (NP-sing)(VP-sing), it
returns its lhs, i.e (S 1) provided that rule is in the cfgrammar;
otherwise it returns None. This func corresponds to functions
lhs_of_rule and ruleInGrammar in the lisp code. """
if rule in gram:
return rule[1][0]
print("Rule not in grammar")
return None
def rhs_of_rule(rule):
""" This function takes a grammatical rule, and returns its RHS """
return rule[0]
def rewrite(lhs, gram):
""" Given lhs of the rule (e.g. ['NP-Sing', 1] , this function returns
the complete rule"""
for rule in gram:
if lhs[0] == lhs_of_rule(rule, gram):
return rule
print("No rule in grammar for lhs = {}".format(lhs))
return []
def non_term(symb, gram):
""" Checks if symb is a non-terminal. If symb is lhs of a rule,
e.g. 'S', this function returns True. Otherwise, it returns False."""
# Check for word
if not isinstance(symb, list):
return False
# Check for syn cat.
if not isinstance(symb[0], list):
return False
for rule in gram:
# lhs_of_rule returns lhs, for e.g. ['NP-sing', 1]
if lhs_of_rule(rule, gram) == symb[0]:
return True
# symb not a non-terminal.
return False
def expand(lhs, gram):
""" Takes the lhs of a rule (S 1) -> NP VP, and returns its rhs."""
for rule in gram:
if lhs[0] == lhs_of_rule(rule, gram):
return rhs_of_rule(rule)
print("Reduction not in grammar")
return []
def npfun(lis):
""" Function which returns the first non [] item in lis """
for item in lis:
if item != []:
# Item will be a list
return item
return None
def pred(lis):
""" This function moves the list representing a relation (first element
of the list) AFTER relational term. """
# Remove all dummy semantic elements.
lis = [ele for ele in lis if ele != []]
# Put the relational predicate in front of the token
lis[0], lis[1] = lis[1], lis[0]
return lis
def s_prop(lis):
""" This function assmembles rel, arg1, arg2 together in a list.
E.g. When lis is [[[1,0,0],['V']],['[]']], it returns
[[1,0,0],['[]'],['V']] for the premise 'the square is to the
right of the triangle'. """
# Switch the order of the tokens we have the PRED part in one list
# element (relation plus last token) and the NP-SING part (1st token
# in the premise) in 2nd list element. Add them to a new list with
# the order [relation, first-token, last-token].
return [lis[0][0], lis[1], lis[0][1]]
def drop_rule_no(lis, lex):
""" This func. takes items obtained from history, drops rule no. from
syn part of each item => ready to push into pstack as part of unred"""
# There are 3 types of elements in history, words, rhs in
# gram/ term in lexicon (e.g. [V-cop', []] and Lhs in gram
# (e.g. [['NP-sing', 1], ['O']]. We need to drop the rule no. from
# the 3rd type -- lhs in gram.
rule_number_absent = []
for ele in lis:
# words on history will not have rule no.s
if word(ele, lex):
rule_number_absent.append(ele)
continue
# No rule no.s in this type of element. [V-cop', []]
if not isinstance(ele[0], list):
rule_number_absent.append(ele)
continue
# pstack requires entries of the form ['NP-sing', ['O']] for
# [['NP-sing', 1], ['O']]
tmp = [ele[0][0], ele[1]]
rule_number_absent.append(tmp)
return rule_number_absent
def copy_history(revrhs, hist, lex):
""" This func. takes reversed rhs constituents of a rule, looks for
their mates in history and returns a list of them, including their
semantics. """
rhs_in_history = []
for syncat in revrhs:
for element in hist:
# If word is in history, indexing it will give an error
if word(element, lex):
continue
# Check if syncats in rhs match a lexicon entry in history
# E.g. revrhs = ['of-p', 'rel front-p', 'in-p'],
# and history has ['of-p', []]
# rhs of rule/lex element in history
if element[0] == syncat:
rhs_in_history.append(element)
continue
# lhs of rule in history, separate if needed as previous if
# will have index out of bounds.
if element[0][0] == syncat:
rhs_in_history.append(element)
return rhs_in_history
|
nilq/baby-python
|
python
|
####################################################################################################
## A simple feed forward network using tensorflow and some of its visualization tools
##Architecture
## 2 hidden layers 1 input and 1 output layers
## input layer : 10 neurons corresponding to season, mnth,holiday,weekday,workingday, weathersit, temp, atemp, hum, windspeed
##hidden layers with 5 and 3 neurons respectively
##output neuron. This is a regression type of problem where the output value predicts the answer "cnt" in the dataset.
####################################################################################################
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from matplotlib import pyplot as plt
#preprocessing the data
path="day.csv"
dataset=pd.read_csv(path)
costHistory=[]
learningRate=0.5
totalepoch=3000
samplesize=90
dataset=dataset.drop(['instant','dteday','casual','registered','yr'],axis=1)
#factors being used are season, mnth,holiday,workingday, weathersit, temp, atemp, hum, windspeed, cnt
dataset=shuffle(dataset)
####create tensor graph
#create placeholder to inject input to the tensorgraph
X=tf.placeholder(dtype="float",shape=[None,10],name="x-input")
Y=tf.placeholder(dtype="float",shape=[None,1],name='output')
weights={'w1':tf.Variable(tf.random_uniform([10,5],minval=1,maxval=9)),
'w2':tf.Variable(tf.random_uniform([5,1],minval=1,maxval=9))} #weights and biases as a dictionary
biases={'b1':tf.Variable(tf.constant(0.5)),
'b2':tf.Variable(tf.constant(0.3))}
layer1_output=tf.nn.relu6(tf.matmul(X,weights['w1']))
layer2_output=tf.nn.sigmoid(tf.matmul(layer1_output,weights['w2']))
cost=tf.reduce_sum(tf.pow((Y-layer2_output),1),axis=1)
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learningRate).minimize(cost)
#run the graph
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(0,totalepoch):
trainingSample = dataset.sample(samplesize)
cnt = np.asarray(trainingSample['cnt']).reshape([samplesize,1])
trainingSample.drop(['cnt'], axis=1)
inparray=np.asarray([trainingSample['season'],trainingSample['mnth'],trainingSample['holiday'],trainingSample['weekday'],trainingSample['workingday'],trainingSample['weathersit'],trainingSample['temp'],trainingSample['atemp'],trainingSample['hum'],trainingSample['windspeed']])
inparray=inparray.transpose()
#print(inparray.shape)
#print(cnt.shape)
sess.run(optimizer,feed_dict={X:inparray,Y:cnt})
cst =sess.run(cost,feed_dict={X:inparray,Y:cnt})
costHistory.append(cst)
plt.plot(range(len(costHistory)), costHistory)
plt.show()
|
nilq/baby-python
|
python
|
import os
class Plugin:
def __init__(self, *args, **kwargs):
self.plugin_name = os.path.basename(__file__)
super()
def execute(self, args):
print('request',self.plugin_name,args)
return {
'contents': f'Hello, {self.plugin_name} '
}
|
nilq/baby-python
|
python
|
def foo(*a):
if a
pass<caret>
|
nilq/baby-python
|
python
|
def multiplication(x):
return x * x
def square(fn, arg):
return fn(arg)
print(square(multiplication,5))
|
nilq/baby-python
|
python
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RRcppziggurat(RPackage):
"""'Rcpp' Integration of Different "Ziggurat" Normal RNG Implementations.
The Ziggurat generator for normally distributed random numbers, originally
proposed by Marsaglia and Tsang (2000, <doi:10.18637/jss.v005.i08>) has
been improved upon a few times starting with Leong et al (2005,
<doi:10.18637/jss.v012.i07>). This package provides an aggregation in order
to compare different implementations in order to provide an 'faster but
good enough' alternative for use with R and C++ code."""
cran = "RcppZiggurat"
version('0.1.6', sha256='9c78255ca476c945c05a564d1e4da363de714d890e0e27f3b252fd73c50eed71')
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-rcppgsl', type=('build', 'run'))
# not listed as a dependency but needed
depends_on('gsl')
|
nilq/baby-python
|
python
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.validation_utils as vu
LOG = logging.getLogger(__name__)
SQOOP_2_SERVER = np.NodeProcess(
name='sqoop2',
ui_name='Sqoop2-Server',
package='mapr-sqoop2-server',
open_ports=[12000]
)
SQOOP_2_CLIENT = np.NodeProcess(
name='sqoop-client',
ui_name='Sqoop2-Client',
package='mapr-sqoop2-client'
)
@six.add_metaclass(s.Single)
class Sqoop2(s.Service):
def __init__(self):
super(Sqoop2, self).__init__()
self.name = 'sqoop'
self.ui_name = 'Sqoop2'
self.version = '2.0.0'
self.node_processes = [SQOOP_2_CLIENT, SQOOP_2_SERVER]
self._validation_rules = [
vu.at_least(1, SQOOP_2_CLIENT),
vu.at_least(1, SQOOP_2_SERVER),
]
def post_install(self, context, instances):
sqoop_servers = context.filter_instances(instances, SQOOP_2_SERVER)
for instance in sqoop_servers:
with instance.remote() as r:
LOG.debug("Setting Sqoop home dir owner")
r.execute_command('chown -R mapr:mapr /opt/mapr/sqoop',
run_as_root=True)
|
nilq/baby-python
|
python
|
#add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from bullet.tm700_rgbd_Gym import tm700_rgbd_gym
from bullet.tm700_rgb_Gym import tm700_rgb_gym
from unused_code.tm700_possensorbothgrippers_Gym import tm700_possensorbothgrippers_gym
import numpy as np
from stable_baselines import DQN, DDPG
from datetime import date
import time
import baselines.parser as parser
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines.bench import Monitor
from stable_baselines.common import set_global_seeds
import matplotlib.pyplot as plt
args = parser.arg_parse()
set_global_seeds(args.random_seed)
start = time.time()
ENVIRONMENT = 'possensorbothgrippers'
MODEL = 'DDPG'
DISCRETE = False
DATE = date.today().strftime("%d-%m")
# DATE = str(time.time())
RENDERS = False
log_dir = ("./logdir_%s_%s_%s/") % (MODEL, ENVIRONMENT, DATE)
time_steps = 10000000
n_steps = 0
os.makedirs(log_dir, exist_ok=True)
################ MODEL AND GYM ENVIRONMENT
if ENVIRONMENT == 'rgbd':
env = tm700_rgbd_gym(renders=RENDERS, isDiscrete=DISCRETE)
env = Monitor(env, os.path.join(log_dir, 'monitor.csv'), allow_early_resets=True)
if ENVIRONMENT == 'rgb':
env = tm700_rgb_gym(renders=RENDERS, isDiscrete=DISCRETE)
env = Monitor(env, os.path.join(log_dir, 'monitor.csv'), allow_early_resets=True)
if ENVIRONMENT == 'possensor':
env = tm700_possensor_gym(renders=RENDERS, isDiscrete=DISCRETE)
env = Monitor(env, os.path.join(log_dir, 'monitor.csv'), allow_early_resets=True)
if ENVIRONMENT == 'possensorbothgrippers':
env = tm700_possensorbothgrippers_gym(renders=RENDERS, isDiscrete=DISCRETE)
env = Monitor(env, os.path.join(log_dir, 'monitor.csv'), allow_early_resets=True)
if MODEL == 'DQN':
from stable_baselines.deepq.policies import LnCnnPolicy, MlpPolicy
if ENVIRONMENT in ['rgbd', 'rgb', 'rgbdsparse']:
model = DQN(LnCnnPolicy, env, verbose=1,
tensorboard_log=(log_dir + "tensorboard_%s_%s_%s/") % (MODEL, ENVIRONMENT, DATE),
gamma=0.99, learning_rate=0.00005, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02,
train_freq=1, batch_size=32, double_q=True, learning_starts=1000,
target_network_update_freq=500, prioritized_replay=True, prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-06,
param_noise=False, _init_setup_model=True,
policy_kwargs=None, full_tensorboard_log=False)
elif ENVIRONMENT in 'possensor':
model = DQN(MlpPolicy, env, verbose=1, tensorboard_log=(log_dir + "tensorboard_%s_%s_%s/") % (MODEL, ENVIRONMENT, DATE) ,
gamma=0.99, learning_rate=0.0005, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02,
train_freq=1, batch_size=32, double_q=True, learning_starts=1000,
target_network_update_freq=500, prioritized_replay=True, prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-06,
param_noise=False, _init_setup_model=True,
policy_kwargs=None, full_tensorboard_log=False)
if MODEL == 'DDPG':
from stable_baselines.ddpg.policies import LnCnnPolicy, MlpPolicy
from stable_baselines.ddpg import AdaptiveParamNoiseSpec
param_noise = AdaptiveParamNoiseSpec(initial_stddev=0.1, desired_action_stddev=0.1)
model = DDPG(MlpPolicy, env, verbose=1, random_exploration=0.05,tensorboard_log=(log_dir + "tensorboard_%s_%s_%s/") % (MODEL, ENVIRONMENT, DATE) )
################ CALLBACK FCTS
######################### PARAMETERS
def get_callback_vars(model, **kwargs):
"""
Helps store variables for the callback functions
:param model: (BaseRLModel)
:param **kwargs: initial values of the callback variables
"""
# save the called attribute in the model
if not hasattr(model, "_callback_vars"):
model._callback_vars = dict(**kwargs)
else: # check all the kwargs are in the callback variables
for (name, val) in kwargs.items():
if name not in model._callback_vars:
model._callback_vars[name] = val
return model._callback_vars # return dict reference (mutable)
def auto_save_callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
# get callback variables, with default values if unintialized
callback_vars = get_callback_vars(_locals["self"], n_steps=0, best_mean_reward=-np.inf)
# skip every 20 steps
if callback_vars["n_steps"] % 20 == 0:
# Evaluate policy training performance
x, y = ts2xy(load_results(log_dir), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
# New best model, you could save the agent here
if mean_reward > callback_vars["best_mean_reward"]:
callback_vars["best_mean_reward"] = mean_reward
# Example for saving best model
print("Saving new best model at {} timesteps".format(x[-1]))
_locals['self'].save(log_dir + 'best_model')
callback_vars["n_steps"] += 1
return True
def plotting_callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
if ENVIRONMENT == 'possensor':
env = tm700_possensor_gym(renders=RENDERS, isDiscrete=DISCRETE)
env = Monitor(env, os.path.join(log_dir, 'monitor.csv'), allow_early_resets=True)
"""
# get callback variables, with default values if unintialized
callback_vars = get_callback_vars(_locals["self"], plot=None)
# get the monitor's data
x, y = ts2xy(load_results(log_dir), 'timesteps')
if callback_vars["plot"] is None: # make the plot
plt.ion()
fig = plt.figure(figsize=(6, 3))
ax = fig.add_subplot(111)
line, = ax.plot(x, y)
callback_vars["plot"] = (line, ax, fig)
plt.show()
else: # update and rescale the plot
callback_vars["plot"][0].set_data(x, y)
callback_vars["plot"][-2].relim()
callback_vars["plot"][-2].set_xlim([_locals["total_timesteps"] * -0.02,
_locals["total_timesteps"] * 1.02])
callback_vars["plot"][-2].autoscale_view(True, True, True)
callback_vars["plot"][-1].canvas.draw()
def compose_callback(*callback_funcs): # takes a list of functions, and returns the composed function.
def _callback(_locals, _globals):
continue_training = True
for cb_func in callback_funcs:
if cb_func(_locals, _globals) is False: # as a callback can return None for legacy reasons.
continue_training = False
return continue_training
return _callback
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward
# Print stats every 1000 calls
if (n_steps + 1) % 1000 == 0:
# Evaluate policy training performance
x, y = ts2xy(load_results(log_dir), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
print(x[-1], 'timesteps')
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model")
_locals['self'].save(log_dir + 'best_model.pkl')
n_steps += 1
return True
def moving_average(values, window):
"""
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
"""
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def plot_results(log_folder, title='Learning Curve'):
"""
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
"""
x, y = ts2xy(load_results(log_folder), 'timesteps')
# print(len(x), len(y))
# y = moving_average(y, window=50)
# print(len(x), len(y))
# Truncate x
# x = x[len(x) - len(y):]
# print(len(x), len(y))
fig = plt.figure(title)
plt.plot(x, y, '.')
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title + " Smoothed")
plt.show()
################ TRAINING
model.learn(total_timesteps=time_steps, callback=auto_save_callback, seed=args.random_seed)
# print('save model')
# savemodel(model, MODEL, ENVIRONMENT, DATE)
# results_plotter.plot_results([log_dir], time_steps, results_plotter.X_TIMESTEPS, "RGB Observation")
# plt.savefig('rewardvssteps_%s_%s_%s.png' % (MODEL, ENVIRONMENT, DATE))
print('total time', time.time()-start)
# plot_results(log_dir)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function
import sys
import json
try:
import rapidjson
fast_json_available = True
except ImportError:
fast_json_available = False
from xml.dom.minidom import parseString as parse_xml_string
try:
from lxml import etree
fast_xml_available = True
except ImportError:
fast_xml_available = False
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter
from . import utils
class NumericRounder(object):
"""A processor for rounding numbers in the event values
For instance, ``1.162537216`` will be changed to ``1.163``.
"""
def __init__(self, digits=3, only_fields=None):
"""Create a processor that rounds numbers in the event values
:param digits: The number of digits to round to
:param only_fields: An iterable specifying the fields to round
"""
self.digits = digits
try:
self.only_fields = set(only_fields)
except TypeError:
self.only_fields = None
def __call__(self, _, __, event_dict):
for key, value in event_dict.items():
if self.only_fields is not None and key not in self.only_fields:
continue
if isinstance(value, bool):
continue # don't convert True to 1.0
try:
event_dict[key] = round(value, self.digits)
except TypeError:
continue
return event_dict
class JSONPrettifier(object):
"""A processor for prettifying JSON strings
For instance, ``{"numbers":[1,2]}`` will be changed to this::
{
"numbers": [
1,
2
]
}
"""
def __init__(self, json_fields):
"""Create a processor that prettifies JSON strings in the event values
:param json_fields: An iterable specifying the fields to prettify
"""
self.fields = json_fields
self.prettify = self.fast_prettify if fast_json_available else self.slow_prettify
@staticmethod
def slow_prettify(code):
return json.dumps(json.loads(code), indent=2)
@staticmethod
def fast_prettify(code):
return rapidjson.dumps(rapidjson.loads(code), indent=2)
def __call__(self, _, __, event_dict):
for field in self.fields:
try:
code = event_dict[field]
except KeyError:
continue
if not code:
continue
event_dict[field] = self.prettify(code)
return event_dict
class XMLPrettifier(object):
"""A processor for prettifying XML strings
For instance, ``<body><elem/><elem /></body>`` will be changed to this::
<body>
<elem/>
<elem/>
</body>
"""
def __init__(self, xml_fields):
"""Create a processor that prettifies XML strings in the event values
:param xml_fields: An iterable specifying the fields to prettify
"""
self.fields = xml_fields
if fast_xml_available:
self.prettify = self.fast_prettify
self.lxml_parser = etree.XMLParser(remove_blank_text=True)
else:
self.prettify = self.slow_prettify
self.lxml_parser = None
@staticmethod
def slow_prettify(code):
xml = parse_xml_string(code)
utils.strip_minidom_whitespace(xml)
xml.normalize()
result = xml.toprettyxml(indent=' ')
result = result.replace('<?xml version="1.0" ?>\n', '')
return result.strip()
def fast_prettify(self, code):
result = etree.tostring(etree.fromstring(code.encode(), parser=self.lxml_parser), pretty_print=True)
return result.strip().decode()
def __call__(self, _, __, event_dict):
for field in self.fields:
try:
code = event_dict[field]
except KeyError:
continue
if not code:
continue
event_dict[field] = self.prettify(code)
return event_dict
class SyntaxHighlighter(object):
"""A processor for syntax highlighting code"""
def __init__(self, field_map):
"""Create a processor that syntax highlights code in the event values
The syntax highlighting will use with ANSI terminal color codes.
:param field_map: A mapping with field names mapped to languages, e.g.
``{'body': 'json': 'soap_response': 'xml'}``
"""
self.lexers = {
field: get_lexer_by_name(language)
for field, language in field_map.items()
}
def __call__(self, _, __, event_dict):
for field, lexer in self.lexers.items():
try:
code = event_dict[field]
except KeyError:
continue
event_dict[field] = highlight(code, lexer, TerminalFormatter())
return event_dict
class MultilinePrinter(object):
"""A processor for printing multiline strings"""
def __init__(self, fields, target=sys.stdout):
"""Create a processor that prints the requested fields' values
This is useful for strings with newlines in them. Keep in mind that the
fields will be popped from the event dictionary, so they will not be
visible to anything (other processors and the logger itself) after this
processor has printed them.
:param fields: An iterable specifying the fields to print
:param target: A file-like object to print to
"""
self.fields = fields
self.target = target
def __call__(self, _, __, event_dict):
for field in self.fields:
try:
print(event_dict.pop(field), file=self.target, end='')
except KeyError:
continue
return event_dict
|
nilq/baby-python
|
python
|
from twisted.trial.unittest import TestCase
import jasmin.vendor.txredisapi as redis
from twisted.internet import reactor, defer
from jasmin.redis.configs import RedisForJasminConfig
from jasmin.redis.client import ConnectionWithConfiguration
@defer.inlineCallbacks
def waitFor(seconds):
# Wait seconds
waitDeferred = defer.Deferred()
reactor.callLater(seconds, waitDeferred.callback, None)
yield waitDeferred
class AuthenticationTestCase(TestCase):
@defer.inlineCallbacks
def setUp(self):
# Connect to redis server
self.RedisForJasminConfigInstance = RedisForJasminConfig()
self.RedisForJasminConfigInstance.password = 'guest'
self.redisClient = yield ConnectionWithConfiguration(self.RedisForJasminConfigInstance)
yield self.redisClient._connected
@defer.inlineCallbacks
def tearDown(self):
yield self.redisClient.disconnect()
@defer.inlineCallbacks
def test_auth(self):
try:
# Authenticate and select db
yield self.redisClient.auth(self.RedisForJasminConfigInstance.password)
yield self.redisClient.select(self.RedisForJasminConfigInstance.dbid)
except Exception as e:
self.assertEqual(type(e), redis.ResponseError)
self.assertEqual(str(e), 'ERR Client sent AUTH, but no password is set')
class RedisTestCase(TestCase):
@defer.inlineCallbacks
def setUp(self):
# Connect to redis server
RedisForJasminConfigInstance = RedisForJasminConfig()
# No auth
RedisForJasminConfigInstance.password = None
self.redisClient = yield ConnectionWithConfiguration(RedisForJasminConfigInstance)
# Authenticate and select db
if RedisForJasminConfigInstance.password is not None:
yield self.redisClient.auth(RedisForJasminConfigInstance.password)
yield self.redisClient.select(RedisForJasminConfigInstance.dbid)
yield self.redisClient._connected
@defer.inlineCallbacks
def tearDown(self):
yield self.redisClient.disconnect()
class DataTestCase(RedisTestCase):
@defer.inlineCallbacks
def test_set_get_string(self):
yield self.redisClient.set('foo', 'bar')
g = yield self.redisClient.get('foo')
self.assertEqual(g, 'bar')
@defer.inlineCallbacks
def test_set_get_list(self):
yield self.redisClient.set('foo:url', 'url of foo')
yield self.redisClient.set('foo:level', 'level of foo')
yield self.redisClient.set('bar:url', 'url of bar')
yield self.redisClient.set('bar:level', 'level of bar')
g = yield self.redisClient.get('foo:url')
self.assertEqual(g, 'url of foo')
g = yield self.redisClient.get('foo:level')
self.assertEqual(g, 'level of foo')
g = yield self.redisClient.get('bar:url')
self.assertEqual(g, 'url of bar')
g = yield self.redisClient.get('bar:level')
self.assertEqual(g, 'level of bar')
@defer.inlineCallbacks
def test_hmset(self):
yield self.redisClient.hmset('h_test', {'key_a': 'value_a', 'key_b': 'value_b'})
# Get desired keys
g = yield self.redisClient.hmget('h_test', ['key_a', 'key_b', 'anything'])
self.assertEqual(g, [u'value_a', u'value_b', None])
# Get all keys
g = yield self.redisClient.hgetall('h_test')
self.assertEqual(g, {u'key_a': u'value_a', u'key_b': u'value_b'})
# Get incorrect redis key
g = yield self.redisClient.hgetall('incorrect')
self.assertEqual(g, {})
@defer.inlineCallbacks
def test_hmset_expiry(self):
yield self.redisClient.hmset('h_test', {'key_a': 'value_a', 'key_b': 'value_b'})
yield self.redisClient.expire('h_test', 5)
# .addCallback(
# self.redisClient.expire, 5
# )
# Get desired keys
g = yield self.redisClient.hgetall('h_test')
self.assertEqual(g, {u'key_a': u'value_a', u'key_b': u'value_b'})
# Wait 6 seconds
yield waitFor(6)
# Redis key must be expired
g = yield self.redisClient.hgetall('h_test')
self.assertEqual(g, {})
|
nilq/baby-python
|
python
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from .models import Book
# Create your views here.
def all_book(request):
all_shit = Book.objects.all()
return render(request, 'bookstore/all_book.html', locals())
def add_book(request):
if request.method == 'GET':
return render(request, 'bookstore/add_book.html')
elif request.method == 'POST':
title = request.POST.get('title')
pub = request.POST.get('pub')
price = request.POST.get('price')
market_price = request.POST.get('market_price')
Book.objects.create(title=title, pub=pub, price=price, market_price=market_price)
return HttpResponseRedirect('/bookstore/all_book')
def update_book(request, bid):
try:
i = Book.objects.get(id=bid)
except:
return HttpResponse('图片编号错误!')
if request.method == 'GET':
return render(request, 'bookstore/update_book.html', locals())
elif request.method == 'POST':
market_price1 = request.POST['market_price']
pub1 = request.POST['pub']
i.market_price = market_price1
i.pub = pub1
i.save()
return HttpResponseRedirect('/bookstore/all_book')
def delete_book(request):
did = request.GET.get('bid')
# 获取要删除的对象
try:
book = Book.objects.get(id=did)
except:
return HttpResponse('图书编号错误!')
book.delete()
return HttpResponseRedirect('/bookstore/all_book')
|
nilq/baby-python
|
python
|
import inpcon_posint as icpi
while True:
#bug: the zero fibonaccinumber is 0
inptext='Which Fibonacci number do you want to see?: '
inp=icpi.inputcontrol(inptext)
if inp==0:
print(0)
print()
print()
continue
erg=[0,1]
for i in range(0,(inp-1),1):
zahl=erg[len(erg)-1]+erg[len(erg)-2]
erg.append(zahl)
print(erg[len(erg)-1])
print()
print()
|
nilq/baby-python
|
python
|
import scipy.signal as ss
import matplotlib.pyplot as plt
import numpy as np
from .PluginManager import PluginManager
class HilbertPlugin(PluginManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hilbert = {}
def hilbert_transform(self, phase_freq=0):
self.hilbert['data'] = ss.hilbert(self.data)
self.hilbert['amplitude'] = np.abs(self.hilbert['data'])
self.hilbert['power'] = self.hilbert['amplitude']**2
self.hilbert['phase'] = np.unwrap(np.angle(self.hilbert['data']))
# Składa sie z czynników:
# - pi/2 - pochodzące od przesunięcia sin/cos
# - instantaneous_phase z sygnału
# - w0 * t, gdzie w0 to 2 pi f (częstość dla, której sprawdzamy fazę)
self.hilbert['phase'] = np.pi / 2 + self.hilbert[
'phase'] - 2 * np.pi * phase_freq * self.t # Wynika ze wzoru z brain.fuw.edu.pl
self.hilbert['phase'] /= np.pi
return self
def hilbert_subtract_base(self, low, high):
low_samp = np.where(self.t == low)[0][0]
high_samp = np.where(self.t == high)[0][0]
for epoch in range(self.epochs):
for channel in range(self.num_channels):
base = np.mean(self.hilbert['power'][epoch, channel, low_samp: high_samp])
self.hilbert['power'][epoch, channel] -= base
self.hilbert['power'][epoch, channel] /= base
return self
def hilbert_mean_power(self):
self.hilbert['power'] = np.mean(self.hilbert['power'], axis=0)
self.hilbert['power'] = np.reshape(self.hilbert['power'], (1, *self.hilbert['power'].shape))
return self
def hilbert_power_plot(
self,
fig=None,
ax=None,
title='',
xlabel='',
ylabel='',
legend=True,
color=None,
*args,
**kwargs):
color = color if color else self.graphics_style['line_color']
if 'plt_style' in self.graphics_style.keys():
plt.style.use(self.graphics_style['plt_style'])
# We will show the graph if no fig or ax is shown. Assuming that this is the desired action.
show = False
if fig is None or ax is None:
show = True
fig, ax = plt.subplots(nrows=self.num_channels, ncols=1)
if self.num_channels == 1:
ax = [ax]
for epoch in self.hilbert['power']:
for idx, channel in enumerate(epoch):
ax[idx].plot(
self.t,
channel,
color=color,
*args,
**kwargs
)
for tag in self.tags:
ax[idx].axvline(
tag / self.fs,
color='#000000',
ls='--'
)
ax[idx].margins(0.1, 0.1)
ax[idx].set_title(
self.channel_names[idx],
fontsize=20
)
ax[idx].set_facecolor(self.graphics_style['plot_background'])
ax[idx].tick_params(labelsize=self.graphics_style['ticks_size'])
ax[idx].grid(self.graphics_style['show_grid'], color=self.graphics_style['grid_color'])
fig.text(
0.5,
0.05,
xlabel,
ha='center',
fontsize=self.graphics_style['label_size']
)
fig.text(
0.5,
0.95,
title,
ha='center',
fontsize=self.graphics_style['label_size']
)
fig.text(
0.04,
0.5,
ylabel,
va='center',
rotation='vertical',
fontsize=self.graphics_style['label_size']
)
fig.patch.set_facecolor(self.graphics_style['figure_background'])
# We only want the label to show once if multiple epochs
if 'label' in kwargs:
del kwargs['label']
if legend:
for a in ax:
a.legend()
if show:
plt.show()
plt.close()
|
nilq/baby-python
|
python
|
# Description: Sample Code to Run mypy
# Variables without types
i:int = 200
f:float = 2.34
str = "Hello"
# A function without type annotations
def greet(name:str)-> str:
return str + " " + name
if __name__ == '__main__':
greet("Dilbert")
|
nilq/baby-python
|
python
|
# This is library template. Do NOT import this, it won't do anything.
# Libraries are loaded with __import__, and thus, the script is ran on load. Be careful what you write here.
|
nilq/baby-python
|
python
|
version = "2.4.5"
default_app_config = "jazzmin.apps.JazzminConfig"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1093/A
t = int(input())
for _ in range(t):
n = int(input())
print(n//2)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from shutil import rmtree
from librosa.feature import mfcc
import numpy as np
from tensorflow.io import gfile
import uuid
from constants import *
def read_dir():
if not os.path.isdir(SOUNDS_DIR):
raise Exception('Sound directory with name \'' + SOUNDS_DIR + '\' not found!')
data = []
for word in WANTED_WORDS:
word_dir = SOUNDS_DIR + word
if not os.path.isdir(word_dir):
raise Exception('Sounds directory for \'' + word + '\' not found at ' + word_dir + '!')
search_path = os.path.join(word_dir, '*.wav')
for wav_path in gfile.glob(search_path):
data.append({'word': word, 'file': wav_path})
return data
def get_features():
features = []
print('Extracting MFCC features from WAV files')
for data in read_dir():
mfcc_feat = get_MFCC(data['file'])
features.append({'data': mfcc_feat, 'label': data['word']})
save_features(features)
def get_MFCC(wav_path):
wav_loader = tf.io.read_file(wav_path)
wav_decoded = tf.audio.decode_wav(wav_loader, desired_channels=1).audio[:DESIRED_SAMPLES]
padding = tf.constant([[DESIRED_SAMPLES - len(wav_decoded), 0], [0, 0]])
audio_data = tf.pad(wav_decoded, padding)
reshaped_data = np.array(tf.reshape(audio_data, (SAMPLE_RATE,)))
feature = mfcc(reshaped_data, SAMPLE_RATE, n_mfcc=FEATURES_COUNT)
return tf.expand_dims(feature, -1)
def save_features(features):
if os.path.isdir(MFCCS_DIR):
rmtree(MFCCS_DIR)
print('Saving MFCC features as tensor files')
for feature in features:
filename = uuid.uuid4().hex + '.mfcc'
file_path = MFCCS_DIR + feature['label'] + '/' + filename
tensor = tf.dtypes.cast(feature['data'], dtype=tf.float32)
tf.io.write_file(file_path, tf.io.serialize_tensor(tensor))
|
nilq/baby-python
|
python
|
import os.path
import yaml
from pathlib import Path
CONFIG_DIRECTORY = str(Path.home()) + "/.tino"
CONFIG_FILENAME = CONFIG_DIRECTORY + "/conf.yml"
class TinoConfig:
def __init__(self):
if not os.path.exists(CONFIG_DIRECTORY):
os.makedirs(CONFIG_DIRECTORY)
if os.path.exists(CONFIG_FILENAME):
with open(CONFIG_FILENAME, 'r') as stream:
self.config = yaml.load(stream)
else:
self.config = {}
def get_tino_config(self):
return self.config
def get_job_config(self, job_name):
if job_name not in self.config:
return {}
return self.config.get(job_name)
def get_job_variables(self, job_name):
job_config = self.get_job_config(job_name)
if "variables" not in job_config:
return {}
return job_config.get("variables")
def update_job_variables(self, job_name, variables):
if job_name not in self.config:
self.config[job_name] = {}
self.config[job_name]["variables"] = variables
with open(CONFIG_FILENAME, 'w') as outfile:
yaml.dump(self.config, outfile, default_flow_style=False)
|
nilq/baby-python
|
python
|
"""
Parameter-Based Methods Module
"""
from ._regular import RegularTransferLR, RegularTransferLC, RegularTransferNN
from ._finetuning import FineTuning
from ._transfer_tree import TransferTreeClassifier
from ._transfer_tree import TransferForestClassifier
__all__ = ["RegularTransferLR",
"RegularTransferLC",
"RegularTransferNN",
"FineTuning",
"TransferTreeClassifier",
"TransferForestClassifier"]
|
nilq/baby-python
|
python
|
"""
abuse.ch Palevo C&C feed RSS bot.
Maintainer: Lari Huttunen <mit-code@huttu.net>
"""
import urlparse
from abusehelper.core import bot
from . import host_or_ip, split_description, AbuseCHFeedBot
class PalevoCcBot(AbuseCHFeedBot):
feed_malware = "palevo"
feed_type = "c&c"
feeds = bot.ListParam(default=["https://palevotracker.abuse.ch/?rssfeed"])
def parse_link(self, link):
# The source seems to provice invalid links, which can
# be fixed by changing the URL scheme from http to https.
split = urlparse.urlparse(link)
if split[0].lower() == "http":
link = urlparse.urlunparse(["https"] + list(split[1:]))
yield "description url", link
def parse_title(self, title):
yield host_or_ip(title.split()[0])
def parse_description(self, description):
for key, value in split_description(description):
if key == "status":
yield key, value
elif key == "sbl" and value.lower() != "not listed":
yield key + " id", value
elif key == "ip address":
yield "ip", value
if __name__ == "__main__":
PalevoCcBot.from_command_line().execute()
|
nilq/baby-python
|
python
|
import unittest
import logging
# se desabilita el sistema de logs del API
logging.disable(logging.CRITICAL)
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
root_response = '''<html>
<head>
<title>Guane Inter FastAPI</title>
</head>
<body>
<h1>Hello World!!!</h1>
</body>
</html>'''
class TestMainEndpoints(unittest.TestCase):
def test_root_endpoint(self):
response = client.get('/')
text = response.text
self.assertEqual(root_response, text)
def make_login(self, username, password):
response = client.post('/token',
headers={'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'},
data={'username': username,
'password': password})
return response.json()
def test_perfect_login(self):
token_data = self.make_login('Luispapiernik', 'Luispapiernik')
keys = token_data.keys()
self.assertIn('access_token', keys)
self.assertIn('token_type', keys)
self.assertIsInstance(token_data['access_token'], str)
self.assertEqual(token_data['token_type'], 'bearer')
def test_login_invalid_credentials(self):
error_data = self.make_login('invalid_user', 'incorrect_password')
keys = error_data.keys()
self.assertIn('detail', keys)
self.assertEqual(error_data['detail'], 'Incorrect username or password')
|
nilq/baby-python
|
python
|
"""Represents a realm in World of Warcraft."""
from __future__ import annotations
__LICENSE__ = """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask_sqlalchemy import BaseQuery
from typing import Optional
from wowapi import WowApi
from pytz import timezone
from api.base import db, BaseSerializerMixin
from api.mod_wow.region import Region
class WowRealm(db.Model, BaseSerializerMixin):
"""Represents a world of warcraft realm.
:attr id: ID of the realm, matching Blizzard's Game API ID.
:attr name: Name of the realm, in en_US locale.
:attr slug: Slug id of the realm, used to query related data.
:attr region: Region this realm belongs to.
:attr timezone_name: The server-side timezone of this realm.
"""
__tablename__ = 'wow_realms'
# Automatically created by db.Model but clarifying existence for mypy.
query: BaseQuery
# Serialization options
serialize_rules = ('-timezone',)
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(
db.DateTime,
default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
name = db.Column(db.String)
slug = db.Column(db.String)
region = db.Column(db.Enum(Region))
timezone_name = db.Column(db.String)
@property
def timezone(self):
"""Returns the timezone object of this realm."""
return timezone(self.timezone_name)
@classmethod
def create_from_api(cls, handler: WowApi, region: Region, realm_slug: str) -> WowRealm:
"""Creates a WowPlayableClass from the data returned by the WoW API"""
data = handler.get_realm(region.value, region.dynamic_namespace, realm_slug, locale='en_US')
realm = cls()
realm.id = data['id']
realm.name = data['name']
realm.slug = data['slug']
realm.region = region
realm.timezone_name = data['timezone']
return realm
@classmethod
def get_or_create(cls, handler: WowApi, region: Region, realm_slug: str) -> WowRealm:
"""Try to get a WowRealm from the database or create it from the API."""
realm: Optional[WowRealm] = cls.query.filter_by(region=region, slug=realm_slug).one_or_none()
if realm is None:
realm = cls.create_from_api(handler, region, realm_slug)
return realm
|
nilq/baby-python
|
python
|
# Copyright (c) 2018 - 2020 Institute for High Voltage Technology and Institute for High Voltage Equipment and Grids, Digitalization and Power Economics
# RWTH Aachen University
# Contact: Thomas Offergeld (t.offergeld@iaew.rwth-aachen.de)
# #
# This module is part of CIMPyORM.
# #
# CIMPyORM is licensed under the BSD-3-Clause license.
# For further information see LICENSE in the project's root directory.
#
import json
from argparse import Namespace
import os
from collections import ChainMap, Iterable, defaultdict
from defusedxml.lxml import parse
import networkx as nx
from networkx import DiGraph, bfs_tree, dfs_tree
from networkx.exception import NetworkXNoPath
from sqlalchemy import TEXT, Integer, Column
from sqlalchemy.exc import InvalidRequestError, OperationalError
from cimpyorm.auxiliary import HDict, merge_descriptions, find_rdfs_path, get_logger, apply_xpath, XPath
from cimpyorm.Model.Elements.Base import CIMNamespace, CIMProfile, prop_used_in, se_type, CIMPackage, ElementMixin, \
se_ref
from cimpyorm.Model.Elements.Enum import CIMEnum, CIMEnumValue
from cimpyorm.Model.Elements.Class import CIMClass
from cimpyorm.Model.Elements.Property import CIMProp, CIMProp_AlphaNumeric, CIMProp_Enumeration, CIMProp_Reference
from cimpyorm.Model.Elements.Datatype import CIMDT
from cimpyorm.backends import InMemory
from cimpyorm.Model.auxiliary import Base
log = get_logger(__name__)
class Schema:
def __init__(self, dataset=None, version: str = "16", rdfs_path=None, profile_whitelist=None):
"""
Initialize a Schema object, containing information about the schema elements.
"""
self.g = None
if not dataset:
backend = InMemory()
backend.reset()
dataset = backend.ORM
if not rdfs_path:
rdfs_path = find_rdfs_path(version)
if not rdfs_path:
raise FileNotFoundError("Failed to find schema file. Please provide one.")
self.rdfs_path = rdfs_path
if profile_whitelist:
profile_whitelist = self.parse_profile_whitelist(profile_whitelist)
self.profiles = profile_whitelist
self.schema_descriptions, profiles = merge_schema_descriptions(
load_schema_descriptions(rdfs_path), profile_whitelist)
log.info(f"Generating Schema backend.")
try:
elements = dataset.query(CIMClass).count()
except OperationalError:
elements = None
if elements:
# A schema is already present, so just load it instead of recreating
self.session = dataset
self.Element_classes = {c.__name__: c for c in
[CIMPackage, CIMClass, CIMProp, CIMDT, CIMEnum, CIMEnumValue]}
self.Elements = {c.__name__: {cim_class.name: cim_class for cim_class in dataset.query(c).all()}
for c in self.Element_classes.values()}
else:
self.session = dataset
self.Element_classes = {c.__name__: c for c in
[ElementMixin, CIMPackage, CIMClass, CIMProp, CIMDT, CIMEnum,
CIMEnumValue]}
self.Elements = {c.__name__: defaultdict(list) for c in self.Element_classes.values()}
_Elements = []
merged_nsmaps = dict(ChainMap(*(element.nsmap for element in
self.schema_descriptions.values())))
profiles = self._generate_profiles(profiles, merged_nsmaps, rdfs_path)
self.session.add_all(profiles.values())
xp = {"type_res": XPath(f"rdf:type/@rdf:resource", namespaces=merged_nsmaps),
"stype_res": XPath(f"cims:stereotype/@rdf:resource", namespaces=merged_nsmaps),
"stype_txt": XPath(f"cims:stereotype/text()", namespaces=merged_nsmaps)}
for key, element in self.schema_descriptions.items():
element.extract_types(xp)
element.schema_type = element.get_type(xp)
self._init_parser(merged_nsmaps)
for short, full_uri in merged_nsmaps.items():
_ns = CIMNamespace(short=short, full_name=full_uri)
self.session.add(_ns)
self._generate(profiles)
self.session.commit()
for _, Cat_Elements in self.Elements.items():
self.session.add_all(Cat_Elements.values())
self.session.commit()
log.info(f"Schema generated")
self._generate_ORM(dataset, profiles)
dataset.schema = self
def _generate_profiles(self, profiles, nsmap, rdfs_path=None):
objects = {}
if rdfs_path:
filepath = os.path.abspath(os.path.join(rdfs_path, "Profile_Dependencies.json"))
if os.path.isfile(filepath):
with open(filepath, "r") as f:
raw = json.loads(f.read())
dependencies = defaultdict(dict)
for profile in raw["Profiles"]:
if "Mandatory" in profile:
dependencies[profile["Name"]]["Mandatory"] = profile["Mandatory"]
if "Optional" in profile:
dependencies[profile["Name"]]["Optional"] = profile["Optional"]
for profile in profiles:
if not profile.endswith("Profile"):
raise ValueError("Invalid profile identifier.")
uri_pattern = profile.replace("Profile", "Version") + ".entsoeURI"
short_pattern = profile.replace("Profile", "Version") + ".shortName"
uri_matches = {key: item for key, item in self.schema_descriptions.items()
if uri_pattern in key}
short_matches = {key: item for key, item in self.schema_descriptions.items()
if short_pattern in key}
URI = json.dumps(
{key.split("#")[-1]: item.descriptions[profile].xpath(
"cims:isFixed/@rdfs:Literal", namespaces=nsmap)[0] for key, item in
uri_matches.items()}
)
_sm = list(short_matches)
if not _sm:
raise ValueError("Profile not defined.")
if len(list(short_matches.values())) > 1:
raise ValueError("Ambiguous profile shortName.")
short = next(iter(short_matches.values())).descriptions[profile].xpath(
"cims:isFixed/@rdfs:Literal", namespaces=nsmap)[0]
_p = CIMProfile(name=profile, uri=URI, short=short)
objects[profile] = _p
for profile, object in objects.items():
try:
if "Mandatory" in dependencies[profile]:
object.mandatory_dependencies = [objects[dependency] for dependency in
dependencies[profile]["Mandatory"]]
except KeyError:
raise ValueError(f"An invalid composition of profiles was given. {profile} depends on"
f" {dependencies[profile]['Mandatory']}, however, at least one of them was not " \
"included in the whitelist.")
if "Optional" in dependencies[profile]:
object.optional_dependencies = [objects[dependency] for dependency in
dependencies[profile]["Optional"] if dependency in objects]
return objects
def deduplicate(self):
for se_type, objects in self.Elements.items():
for key, values in objects.items():
if len(values) > 1:
descrs = [value.schema_elements for value in values]
objects[key] = self.Element_classes[se_type](merge_descriptions(descrs),
values[0].profile_name)
else:
objects[key] = values[0]
def get_inheritance_graph(self, profiles=None):
"""
Determine the class inheritance hierarchy (class definition needs to adhere to strict inheritance hierarchy)
:return: g - A networkx DiGraph of the class hierarchy, with a common ancestor __root__
"""
# Determine class inheritance hierarchy (bfs on a directed graph)
if not profiles:
log.info(f"No profiles specified - using all profiles for ORM.")
elif not isinstance(profiles, Iterable):
profiles = (profiles,)
g = DiGraph()
g.add_node("__root__")
class_list = list(self.session.query(CIMClass).all())
classes = {}
for c in class_list:
if (c.namespace.short, c.name) in classes:
raise ValueError("Duplicate class identity: %s_%s." % (c.namespace.short, c.name))
classes[(c.namespace.short, c.name)] = c
nodes = classes.keys()
g.add_nodes_from(nodes)
for key, instance in classes.items():
if instance:
parent = instance.parent
if parent is None:
g.add_edge("__root__", key)
else:
parent_key = (parent.namespace.short, parent.name)
g.add_edge(parent_key, key)
return g, classes
def _init_parser(self, nsmap):
ElementMixin.nsmap = HDict(nsmap) # Set the nsmap on the Baseclass.
for c in self.Element_classes.values():
c._generateXPathMap()
@property
def model(self):
for class_ in self.session.query(CIMClass).all():
class_.p = Namespace(**class_.all_props)
for enum_ in self.session.query(CIMEnum).all():
enum_.v = Namespace(**{value.name: value for value in enum_.values})
# The cim namespace is provided in top-level model as default namespace. Everything else
# is hidden in separate Namespaces
namespaces = {ns.short: ns for ns in self.session.query(CIMNamespace)}
classes = {}
for short, namespace in namespaces.items():
classes[short] = \
Namespace(**{c.name: c.class_ for c in
self.session.query(CIMClass).filter(CIMClass.namespace == namespace)})
return Namespace(**classes["cim"].__dict__,
**classes,
**{"dt": Namespace(**{c.name: c for c in self.session.query(CIMDT).all()})},
**{"classes": Namespace(**{c.name: c for c in self.session.query(CIMClass).all()})},
**{"enum": Namespace(**{c.name: c for c in self.session.query(
CIMEnum).all()})},
**{"schema": self})
def get_classes(self):
return {c.name: c.class_ for c in self.session.query(CIMClass).all()}
def _generate(self, profiles):
_Elements = self.Elements
postponed = []
insertables = []
for key, element in self.schema_descriptions.items():
if not element.schema_type.postpone:
type_name = element.schema_type.name
try:
obj = self.Element_classes[type_name](element)
_Elements[type_name][obj.u_key] = obj
obj.used_in = [profiles[_p] for _p in element.get_all_profiles()]
if isinstance(obj, CIMClass):
element_profile = element.get_profile()
obj.defined_in = element_profile
except KeyError:
log.warning(f"Unknown element: {element}.")
else:
postponed.append(element)
for element in postponed:
type_res = element.type_res
if type_res and type_res[0].endswith("#Property"):
obj = CIMProp(element)
domain = obj._get_domain()
if se_ref(domain[1], domain[0]) in _Elements["CIMDT"].keys():
dt = _Elements["CIMDT"][se_ref(domain[1], domain[0])]
if obj.name == "unit":
dt.set_unit(element.descriptions, type="nominator")
elif obj.name == "value":
dt.set_datatype(element.descriptions)
elif obj.name == "multiplier":
dt.set_multiplier(element.descriptions, type="nominator")
elif obj.name == "denominatorUnit":
dt.set_unit(element.descriptions, type="denominator")
elif obj.name == "denominatorMultiplier":
dt.set_multiplier(element.descriptions, type="denominator")
else:
raise TypeError
else:
if not obj.range_name:
obj = CIMProp_AlphaNumeric(element)
else:
range = obj._get_range()
key = se_ref(range[1], obj.namespace_name)
if key in _Elements["CIMEnum"]:
obj = CIMProp_Enumeration(element)
else:
obj = CIMProp_Reference(element)
_Elements["CIMProp"][obj.u_key] = obj
obj.defined_in = element.get_profile()
# ToDo: Find out why using "allowed_in" causes UNIQUE constraint errors on
# CIMProp
# obj.allowed_in = [profiles[_p] for _p in element.get_all_profiles()]
for profile in element.get_all_profiles():
insertables.append(
prop_used_in.insert().values(
profile_name=profile,
prop_namespace=obj.namespace_name,
prop_name=obj.name,
prop_cls_namespace=obj.cls_namespace,
prop_cls_name=obj.cls_name))
continue
obj = CIMEnumValue(element)
enum = obj._get_enum()
if se_ref(enum[1], enum[0]) in _Elements["CIMEnum"]:
_Elements["CIMEnumValue"][obj.u_key] = obj
else:
name = enum[1]
_notfound = True
for key, enum in _Elements["CIMEnum"].items():
if enum.name == name:
obj.namespace_name = key.namespace_name
obj.enum_namespace = key.namespace_name
_Elements["CIMEnumValue"][obj.u_key] = obj
_notfound=False
break
if _notfound:
log.warning(f"Failed to identify purpose for {type_res}")
for insertable in insertables:
self.session.execute(insertable)
@property
def map(self):
if not self.g:
g = DiGraph()
classnames = [_[0] for _ in self.session.query(CIMClass.name).all()]
classes = self.session.query(CIMClass).all()
enums = self.session.query(CIMEnum).all()
enumnames = [_[0] for _ in self.session.query(CIMEnum.name).all()]
propnames = [_[0] for _ in self.session.query(CIMProp.name).all()]
g.add_nodes_from(classnames)
g.add_nodes_from(enumnames)
g.add_nodes_from(propnames)
for node in classes + enums:
try:
for prop in node.all_props.values():
if prop.range:
g.add_edge(node.name, prop.range.name, label=prop.label)
else:
g.add_edge(node.name, prop.name, label=prop.label)
except AttributeError:
pass
self.g = g
return self.g
def path(self, source, destination):
from fuzzyset import FuzzySet
if source == destination:
return
fuzz = FuzzySet(self.map.nodes)
if source not in self.map.nodes:
source = fuzzymatch(fuzz, source)
if destination not in self.map.nodes:
destination = fuzzymatch(fuzz, destination)
try:
path = nx.shortest_path(self.map, source, destination)
except NetworkXNoPath:
log.error(f"No path between {source.name} and {destination.name}.")
return
way = []
for iter in range(1, len(path)):
way.append(self.map.edges[path[iter-1], path[iter]]["label"])
return way
def deduplicate_schema_elements(self, _Elements, profile):
for Category, CatElements in _Elements.items():
log.debug(f"Merging {Category}.")
for NodeName, NodeElements in CatElements.items():
CatElements[NodeName] = self.Element_classes[Category](
merge_descriptions([e.schema_elements for e in NodeElements]), profile)
_Elements[Category] = dict(CatElements)
return _Elements
def flatten(self):
result = self.Elements
for _profile in self.Elements:
for Cat, Items in _profile.items():
for Item, Value in Items.items():
[result[Cat].append(v) for v in Value]
def _generate_ORM(self, session, profiles=None):
# Fixme: 20 seconds
hierarchy = self.class_hierarchy(profiles)
try:
for c in hierarchy:
c.init_type(Base)
except InvalidRequestError as ex:
ex
session.commit()
session.flush()
namespaces = session.query(CIMNamespace.short, CIMNamespace.full_name).all()
nsmap = {k: v for k, v in namespaces}
for c in hierarchy:
c.generate(nsmap)
log.info(f"Generated {len(hierarchy)} classes")
def class_hierarchy(self, profiles=None, mode="bfs"):
g, classes = self.get_inheritance_graph(profiles)
if mode == "dfs":
nodes = list(dfs_tree(g, "__root__"))
else:
nodes = list(bfs_tree(g, "__root__"))
nodes.remove("__root__")
return [classes[node] for node in nodes]
def parse_profile_whitelist(self, profile_whitelist):
filepath = os.path.abspath(os.path.join(self.rdfs_path, "Profile_Dependencies.json"))
if os.path.isfile(filepath):
with open(filepath, "r") as f:
raw = json.loads(f.read())
aliases = {profile["short"]: profile["Name"] for profile in raw["Profiles"]}
try:
profiles = set((aliases[profile] if profile not in aliases.values() else profile for profile in
profile_whitelist))
except KeyError:
raise ValueError(f"Unknown Profile shortName provided")
return profiles
class SchemaDescription:
def __init__(self, tree):
self.tree = tree
self.root = self.tree.getroot()
self.nsmap = self.root.nsmap
self.associated_profile = str(self._get_profile())
@classmethod
def from_file(cls, path):
return cls(parse(path))
def _get_profile(self):
first_element = self.root[0]
if not first_element.attrib.values()[0].endswith("Profile"):
raise ValueError("Profile element not found in schema description (should be position 1).")
return first_element.xpath(f"rdfs:label/text()", namespaces=self.nsmap)[0]
class SchemaElement:
def __init__(self, descriptions=None):
self.name = None
self._types = Namespace()
self.type_res = None
self.stype_res = None
self.stype_txt = None
self.nsmap = {}
self.schema_type = None
if not descriptions:
self.descriptions = {}
else:
self.descriptions = descriptions
for description in descriptions:
self.nsmap.update(description.nsmap)
def get_profile(self):
candidates = set([k for k, v in self._types.stype_res.items()
if v == "http://iec.ch/TC57/NonStandard/UML#concrete"])
if not candidates:
candidates = self.descriptions.keys()
if len(candidates) == 1:
return next(iter(candidates))
elif len(set((c.replace("Boundary", "") for c in candidates))) == 1:
return next(iter(candidates)).replace("Boundary", "")
else:
candidates
log.warning(f"Multiple profiles found for {self.name}. Defaulting to EquipmentProfile.")
return "EquipmentProfile"
def get_all_profiles(self):
return tuple(self.descriptions.keys())
def update(self, profile, description):
if not self.name:
self.name = description.values()[0]
elif not self.name == description.values()[0]:
raise ValueError("Ambiguous SchemaElement.")
if profile not in self.descriptions:
self.descriptions.update({profile: description})
else:
self.descriptions[profile].extend(description)
for k, v in description.nsmap.items():
if k in self.nsmap and not v == self.nsmap[k]:
raise ValueError("Ambiguous namespace definition.")
else:
self.nsmap[k] = v
def extract_types(self, xp):
self._types.type_res = self._value(xp["type_res"])
self._types.stype_res = self._value(xp["stype_res"])
self._types.stype_txt = self._value(xp["stype_txt"])
self.type_res = tuple(set(elements for elements in self._types.type_res.values()))
self.stype_res = tuple(set(elements for elements in self._types.stype_res.values()))
self.stype_txt = tuple(set(elements for elements in self._types.stype_txt.values()))
def get_type(self, xp):
type_res = self.type_res
stype_res = self.stype_res
stype_txt = self.stype_txt
if len(type_res) > 1:
raise ValueError
if len(stype_res) > 1 or len(stype_txt) > 1:
type_res
if type_res and any(v.endswith("#Class") for v in type_res):
# Element is a class object
if stype_res and stype_res[0].endswith("#enumeration"):
# Enumeration
return se_type("CIMEnum", False)
elif stype_txt and "CIMDatatype" in stype_txt or "Primitive" in stype_txt:
# Datatype
return se_type("CIMDT", False)
else:
# Proper class
return se_type("CIMClass", False)
elif type_res and any(v.endswith("#Property") for v in type_res):
# Properties can be several types of objects. We postpone, so we can determine the
# type later.
return se_type("Uncertain", True)
elif type_res and any(v.endswith("#ClassCategory") for v in type_res):
return se_type("CIMPackage", False)
else:
return se_type("Unknown", True)
def _value(self, xp):
res = {profile: set(xp(element)) for profile, element in self.descriptions.items() if xp(
element)}
for key, value in res.items():
if len(value) > 1:
value
res[key] = value.pop()
return res
def xpath(self, xpath_expr):
return apply_xpath(xpath_expr, self.descriptions)
def load_schema_descriptions(path):
"""
Loads the schema descriptions
:param path:
:return:
"""
return [SchemaDescription.from_file(os.path.join(path, file)) for file in os.listdir(path) if
file.endswith(".rdf")]
def merge_schema_descriptions(descriptions, profile_whitelist=None):
_elements = defaultdict(SchemaElement)
if not profile_whitelist:
profiles = set((d.associated_profile for d in descriptions))
else:
profiles = set(profile_whitelist)
for description in descriptions:
if description.associated_profile in profiles:
for child in description.root:
xml_key = child.values()[0]
_elements[xml_key].update(description.associated_profile, child)
_elements = dict(_elements)
return _elements, profiles
def merge_nsmaps(nsmaps):
merged = nsmaps[0]
for nsmap in nsmaps[1:]:
for k, v in nsmap.items():
if k in merged and v != merged[k]:
log.error("Incompatible namespaces in nsmaps")
merged[k] = v
return merged
def fuzzymatch(set, value):
result = set.get(value)
if result and result[0][0]>0.2:
log.warning(f"Did you mean {result[0][1]} (matched from {value})?")
return result[0][1]
else:
return None
class SchemaInfo(Base):
__tablename__ = "SchemaInfo"
namespaces = Column(TEXT)
id = Column(Integer, primary_key=True, autoincrement=True)
def __init__(self, nsmap):
"""
Initialize SchemaInfo object
:param source_file: Path to the file containing the model data
"""
self.namespaces = json.dumps(nsmap)
@property
def nsmap(self):
"""
Return the source's nsmap
:return: dict - The source's nsmap
"""
nsmap = json.loads(self.namespaces)
return nsmap
|
nilq/baby-python
|
python
|
import json
import sewer
class ExmpleDnsProvider(sewer.dns_providers.common.BaseDns):
def __init__(self):
self.dns_provider_name = 'example_dns_provider'
def create_dns_record(self, domain_name, base64_of_acme_keyauthorization):
pass
def delete_dns_record(self, domain_name, base64_of_acme_keyauthorization):
pass
class MockResponse(object):
"""
mock python-requests Response object
"""
def __init__(self, status_code=201, content='{"something": "ok"}'):
self.status_code = status_code
# the certificate tags are needed by the `get_certificate_chain` method of AcmeClient
self.content = content + '-----BEGIN CERTIFICATE----- some-mock-certificate -----END CERTIFICATE-----'
self.content_to_use_in_json_method = content
self.headers = {'Replay-Nonce': 'example-replay-Nonce'}
def json(self):
return json.loads(self.content_to_use_in_json_method)
|
nilq/baby-python
|
python
|
r"""
Backrefs for the 'regex' module.
Add the ability to use the following backrefs with re:
* \Q and \Q...\E - Escape/quote chars (search)
* \c and \C...\E - Uppercase char or chars (replace)
* \l and \L...\E - Lowercase char or chars (replace)
Compiling
=========
pattern = compile_search(r'somepattern', flags)
replace = compile_replace(pattern, r'\1 some replace pattern')
Usage
=========
Recommended to use compiling. Assuming the above compiling:
text = pattern.sub(replace, 'sometext')
--or--
m = pattern.match('sometext')
if m:
text = replace(m) # similar to m.expand(template)
Licensed under MIT
Copyright (c) 2015 - 2016 Isaac Muse <isaacmuse@gmail.com>
"""
from __future__ import unicode_literals
import sys
import re
import functools
from collections import namedtuple
from . import compat
from . import common_tokens as ctok
try:
import regex
REGEX_SUPPORT = True
except Exception: # pragma: no coverage
REGEX_SUPPORT = False
MAXUNICODE = sys.maxunicode
NARROW = sys.maxunicode == 0xFFFF
if REGEX_SUPPORT:
# Expose some common re flags and methods to
# save having to import re and backrefs libs
D = regex.D
DEBUG = regex.DEBUG
A = regex.A
ASCII = regex.ASCII
B = regex.B
BESTMATCH = regex.BESTMATCH
E = regex.E
ENHANCEMATCH = regex.ENHANCEMATCH
F = regex.F
FULLCASE = regex.FULLCASE
I = regex.I
IGNORECASE = regex.IGNORECASE
L = regex.L
LOCALE = regex.LOCALE
M = regex.M
MULTILINE = regex.MULTILINE
R = regex.R
REVERSE = regex.REVERSE
S = regex.S
DOTALL = regex.DOTALL
U = regex.U
UNICODE = regex.UNICODE
X = regex.X
VERBOSE = regex.VERBOSE
V0 = regex.V0
VERSION0 = regex.VERSION0
V1 = regex.V1
VERSION1 = regex.VERSION1
W = regex.W
WORD = regex.WORD
P = regex.P
POSIX = regex.POSIX
DEFAULT_VERSION = regex.DEFAULT_VERSION
REGEX_TYPE = type(regex.compile('', 0))
escape = regex.escape
purge = regex.purge
# Replace flags
FORMAT = 1
# Case upper or lower
_UPPER = 0
_LOWER = 1
utokens = {
"regex_flags": re.compile(
r'(?s)(\\.)|\(\?((?:[Laberuxp]|V0|V1|-?[imsfw])+)[):]|(.)'
),
"replace_group_ref": re.compile(
r'''(?x)
(\\)|
(
[0-7]{3}|
[1-9][0-9]?|
[cClLEabfrtnv]|
g<(?:[a-zA-Z]+[a-zA-Z\d_]*|0+|0*[1-9][0-9]?)>|
U[0-9a-fA-F]{8}|
u[0-9a-fA-F]{4}|
x[0-9a-fA-F]{2}
)
'''
),
"format_replace_ref": re.compile(
r'''(?x)
(\\)|
(
[cClLEabfrtnv]|
U[0-9a-fA-F]{8}|
u[0-9a-fA-F]{4}|
x[0-9a-fA-F]{2}|
[0-7]{1,3}|
(
g<(?:[a-zA-Z]+[a-zA-Z\d_]*|0+|0*[1-9][0-9]?)>
)
)|
(\{)'''
),
"regex_search_ref": re.compile(r'(\\)|([(EQ])'),
"regex_search_ref_verbose": re.compile(r'(\\)|([(EQ#])'),
"v0": 'V0',
"v1": 'V1'
}
btokens = {
"regex_flags": re.compile(
br'(?s)(\\.)|\(\?((?:[Laberuxp]|V0|V1|-?[imsfw])+)[):]|(.)'
),
"replace_group_ref": re.compile(
br'''(?x)
(\\)|
(
[0-7]{3}|
[1-9][0-9]?|
[cClLEabfrtnv]|
g<(?:[a-zA-Z]+[a-zA-Z\d_]*|0+|0*[1-9][0-9]?)>|
x[0-9a-fA-F]{2}
)
'''
),
"format_replace_ref": re.compile(
br'''(?x)
(\\)|
(
[cClLEabfrtnv]|
x[0-9a-fA-F]{2}|
[0-7]{1,3}|
(
g<(?:[a-zA-Z]+[a-zA-Z\d_]*|0+|0*[1-9][0-9]?)>
)
)|
(\{)'''
),
"regex_search_ref": re.compile(br'(\\)|([EQ])'),
"regex_search_ref_verbose": re.compile(br'(\\)|([EQ#])'),
"v0": b'V0',
"v1": b'V1'
}
class RegexSearchTokens(compat.Tokens):
"""Tokens."""
def __init__(self, string, verbose):
"""Initialize."""
if isinstance(string, compat.binary_type):
tokens = btokens
ctokens = ctok.btokens
else:
tokens = utokens
ctokens = ctok.utokens
self.string = string
if verbose:
self._regex_search_ref = tokens["regex_search_ref_verbose"]
else:
self._regex_search_ref = tokens["regex_search_ref"]
self._b_slash = ctokens["b_slash"]
self.max_index = len(string) - 1
self.index = 0
self.current = None
def __iter__(self):
"""Iterate."""
return self
def iternext(self):
"""
Iterate through characters of the string.
Count escaped Q, E and backslash as a single char.
"""
if self.index > self.max_index:
raise StopIteration
char = self.string[self.index:self.index + 1]
if char == self._b_slash:
m = self._regex_search_ref.match(self.string[self.index + 1:])
if m:
char += m.group(1) if m.group(1) else m.group(2)
self.index += len(char)
self.current = char
return self.current
# Break apart template patterns into char tokens
class ReplaceTokens(compat.Tokens):
"""Preprocess replace tokens."""
def __init__(self, string, use_format=False):
"""Initialize."""
if isinstance(string, compat.binary_type):
ctokens = ctok.btokens
tokens = btokens
else:
ctokens = ctok.utokens
tokens = utokens
self.string = string
self.use_format = use_format
if use_format:
self._replace_ref = tokens["format_replace_ref"]
else:
self._replace_ref = tokens["replace_group_ref"]
self._format_replace_group = ctokens["format_replace_group"]
self._lc_bracket = ctokens["lc_bracket"]
self._rc_bracket = ctokens["rc_bracket"]
self._b_slash = ctokens["b_slash"]
self.max_index = len(string) - 1
self.index = 0
self.current = None
def __iter__(self):
"""Iterate."""
return self
def iternext(self):
"""
Iterate through characters of the string.
Count escaped l, L, c, C, E and backslash as a single char.
"""
if self.index > self.max_index:
raise StopIteration
char = self.string[self.index:self.index + 1]
if char == self._b_slash:
m = self._replace_ref.match(self.string[self.index + 1:])
if m:
if self.use_format and (m.group(3) or m.group(4)):
char += self._b_slash
self.index -= 1
if not self.use_format or not m.group(4):
char += m.group(1) if m.group(1) else m.group(2)
elif self.use_format and char in (self._lc_bracket, self._rc_bracket):
m = self._format_replace_group.match(self.string[self.index:])
if m:
if m.group(2):
char = m.group(2)
else:
self.index += 1
else:
raise ValueError("Single unmatched curly bracket!")
self.index += len(char)
self.current = char
return self.current
class RegexSearchTemplate(object):
"""Search Template."""
def __init__(self, search, re_verbose=False, re_version=0):
"""Initialize."""
if isinstance(search, compat.binary_type):
self.binary = True
tokens = btokens
ctokens = ctok.btokens
else:
self.binary = False
tokens = utokens
ctokens = ctok.utokens
self._verbose_flag = ctokens["verbose_flag"]
self._empty = ctokens["empty"]
self._b_slash = ctokens["b_slash"]
self._ls_bracket = ctokens["ls_bracket"]
self._rs_bracket = ctokens["rs_bracket"]
self._esc_end = ctokens["esc_end"]
self._end = ctokens["end"]
self._quote = ctokens["quote"]
self._negate = ctokens["negate"]
self._regex_flags = tokens["regex_flags"]
self._nl = ctokens["nl"]
self._hashtag = ctokens["hashtag"]
self._V0 = tokens["v0"]
self._V1 = tokens["v1"]
self.search = search
if regex.DEFAULT_VERSION == V0:
self.groups, quotes = self.find_char_groups_v0(search)
else: # pragma: no cover
self.groups, quotes = self.find_char_groups_v1(search)
self.verbose, self.version = self.find_flags(search, quotes, re_verbose, re_version)
if self.version != regex.DEFAULT_VERSION:
if self.version == V0: # pragma: no cover
self.groups = self.find_char_groups_v0(search)[0]
else:
self.groups = self.find_char_groups_v1(search)[0]
if self.verbose:
self._verbose_tokens = ctokens["verbose_tokens"]
else:
self._verbose_tokens = tuple()
self.extended = []
def find_flags(self, s, quotes, re_verbose, re_version):
"""Find verbose and unicode flags."""
new = []
start = 0
verbose_flag = re_verbose
version_flag = re_version
avoid = quotes + self.groups
avoid.sort()
if version_flag and verbose_flag:
return bool(verbose_flag), version_flag
for a in avoid:
new.append(s[start:a[0] + 1])
start = a[1]
new.append(s[start:])
for m in self._regex_flags.finditer(self._empty.join(new)):
if m.group(2):
if self._verbose_flag in m.group(2):
verbose_flag = True
if self._V0 in m.group(2):
version_flag = V0
elif self._V1 in m.group(2):
version_flag = V1
if version_flag and verbose_flag:
break
return bool(verbose_flag), version_flag if version_flag else regex.DEFAULT_VERSION
def find_char_groups_v0(self, s):
"""Find character groups."""
pos = 0
groups = []
quotes = []
quote_found = False
quote_start = 0
escaped = False
found = False
first = None
for c in compat.iterstring(s):
if c == self._b_slash:
escaped = not escaped
elif escaped and not found and not quote_found and c == self._quote:
quote_found = True
quote_start = pos - 1
escaped = False
elif escaped and not found and quote_found and c == self._end:
quotes.append((quote_start + 2, pos - 2))
quote_found = False
escaped = False
elif escaped:
escaped = False
elif quote_found:
pass
elif c == self._ls_bracket and not found:
found = True
first = pos
elif c == self._negate and found and (pos == first + 1):
first = pos
elif c == self._rs_bracket and found and (pos != first + 1):
groups.append((first + 1, pos - 1))
found = False
pos += 1
if quote_found:
quotes.append((quote_start + 2, pos - 1))
return groups, quotes
def find_char_groups_v1(self, s):
"""Find character groups."""
pos = 0
groups = []
quotes = []
quote_found = False
quote_start = 0
escaped = False
found = 0
first = None
sub_first = None
for c in compat.iterstring(s):
if c == self._b_slash:
# Next char is escaped
escaped = not escaped
elif escaped and found == 0 and not quote_found and c == self._quote:
quote_found = True
quote_start = pos - 1
escaped = False
elif escaped and found == 0 and quote_found and c == self._end:
quotes.append((quote_start, pos))
quote_found = False
escaped = False
elif escaped:
# Escaped handled
escaped = False
elif quote_found:
pass
elif c == self._ls_bracket and not found:
# Start of first char set found
found += 1
first = pos
elif c == self._ls_bracket and found:
# Start of sub char set found
found += 1
sub_first = pos
elif c == self._negate and found == 1 and (pos == first + 1):
# Found ^ at start of first char set; adjust 1st char pos
first = pos
elif c == self._negate and found > 1 and (pos == sub_first + 1):
# Found ^ at start of sub char set; adjust 1st char sub pos
sub_first = pos
elif c == self._rs_bracket and found == 1 and (pos != first + 1):
# First char set closed; log range
groups.append((first, pos))
found = 0
elif c == self._rs_bracket and found > 1 and (pos != sub_first + 1):
# Sub char set closed; decrement depth counter
found -= 1
pos += 1
if quote_found:
quotes.append((quote_start, pos - 1))
return groups, quotes
def comments(self, i):
"""Handle comments in verbose patterns."""
parts = []
try:
t = next(i)
while t != self._nl:
parts.append(t)
t = next(i)
parts.append(self._nl)
except StopIteration:
pass
return parts
def quoted(self, i):
r"""Handle quoted block."""
quoted = []
raw = []
if not self.in_group(i.index - 1):
try:
t = next(i)
while t != self._esc_end:
raw.append(t)
t = next(i)
except StopIteration:
pass
if len(raw):
quoted.extend([escape(self._empty.join(raw))])
return quoted
def in_group(self, index):
"""Check if last index was in a char group."""
inside = False
for g in self.groups:
if g[0] <= index <= g[1]:
inside = True
break
return inside
def apply(self):
"""Apply search template."""
i = RegexSearchTokens(self.search, self.verbose)
iter(i)
for t in i:
if len(t) > 1:
# handle our stuff
c = t[1:]
if c[0:1] in self._verbose_tokens:
self.extended.append(t)
elif c == self._quote:
self.extended.extend(self.quoted(i))
elif c != self._end:
self.extended.append(t)
elif self.verbose and t == self._hashtag and not self.in_group(i.index - 1):
self.extended.append(t)
self.extended.extend(self.comments(i))
else:
self.extended.append(t)
return self._empty.join(self.extended)
class ReplaceTemplate(object):
"""Pre-replace template."""
def __init__(self, pattern, template, use_format=False):
"""Initialize."""
if isinstance(template, compat.binary_type):
self.binary = True
ctokens = ctok.btokens
else:
self.binary = False
ctokens = ctok.utokens
self.string_convert = compat.int2bytes if self.binary else compat.int2str
self.use_format = use_format
self._original = template
self._esc_end = ctokens["esc_end"]
self._end = ctokens["end"]
self._lc = ctokens["lc"]
self._ls_bracket = ctokens["ls_bracket"]
self._lc_bracket = ctokens["lc_bracket"]
self._lc_span = ctokens["lc_span"]
self._uc = ctokens["uc"]
self._uc_span = ctokens["uc_span"]
self._group = ctokens["group"]
self._empty = ctokens["empty"]
self._group_start = ctokens["group_start"]
self._group_end = ctokens["group_end"]
self._binary = ctokens["binary"]
self._octal = ctokens["octal"]
self._hex = ctokens["hex"]
self._minus = ctokens["minus"]
self._zero = ctokens["zero"]
self._unicode_narrow = ctokens["unicode_narrow"]
self._unicode_wide = ctokens["unicode_wide"]
self.end_found = False
self.group_slots = []
self.literal_slots = []
self.result = []
self.span_stack = []
self.single_stack = []
self.slot = 0
self.manual = False
self.auto = False
self.auto_index = 0
self.pattern_hash = hash(pattern)
self.parse_template(pattern)
def regex_parse_template(self, template, pattern):
"""
Parse template for the regex module.
Do NOT edit the literal list returned by
_compile_replacement_helper as you will edit
the original cached value. Copy the values
instead.
"""
groups = []
literals = []
replacements = regex._compile_replacement_helper(pattern, template)
count = 0
for part in replacements:
if isinstance(part, int):
literals.append(None)
groups.append((count, part))
else:
literals.append(part)
count += 1
return groups, literals
def parse_template(self, pattern):
"""Parse template."""
i = ReplaceTokens(self._original, use_format=self.use_format)
iter(i)
self.result = [self._empty]
for t in i:
if len(t) > 1:
if self.use_format and t[0:1] == self._lc_bracket:
self.handle_format_group(t[1:-1].strip())
else:
c = t[1:]
if c[0:1].isdigit() and (self.use_format or len(c) == 3):
value = int(c, 8)
if value > 0xFF:
if self.binary:
# Re fails on octal greater than 0o377 or 0xFF
raise ValueError("octal escape value outside of range 0-0o377!")
self.result.append('\\u%04x' % value)
else:
self.result.append(self.string_convert('\\%03o' % value))
elif not self.use_format and (c[0:1].isdigit() or c[0:1] == self._group):
self.handle_group(t)
elif c == self._lc:
self.single_case(i, _LOWER)
elif c == self._lc_span:
self.span_case(i, _LOWER)
elif c == self._uc:
self.single_case(i, _UPPER)
elif c == self._uc_span:
self.span_case(i, _UPPER)
elif c == self._end:
# This is here just as a reminder that \E is ignored
pass
else:
self.result.append(t)
else:
self.result.append(t)
if len(self.result) > 1:
self.literal_slots.append(self._empty.join(self.result))
del self.result[:]
self.result.append(self._empty)
self.slot += 1
self._template = self._empty.join(self.literal_slots)
self.groups, self.literals = self.regex_parse_template(self._template, pattern)
def span_case(self, i, case):
"""Uppercase or lowercase the next range of characters until end marker is found."""
attr = "lower" if case == _LOWER else "upper"
self.span_stack.append(attr)
try:
t = next(i)
while t != self._esc_end:
if len(t) > 1:
if self.use_format and t[0:1] == self._lc_bracket:
self.handle_format_group(t[1:-1].strip())
else:
c = t[1:]
first = c[0:1]
if first.isdigit() and (self.use_format or len(c) == 3):
value = int(c, 8)
if self.binary:
if value > 0xFF:
# Re fails on octal greater than 0o377 or 0xFF
raise ValueError("octal escape value outside of range 0-0o377!")
text = getattr(compat.uchr(value), attr)()
single = self.get_single_stack()
value = ord(getattr(text, single)()) if single is not None else ord(text)
self.result.append(self.string_convert('\\%03o' % value))
else:
text = getattr(compat.uchr(value), attr)()
single = self.get_single_stack()
value = ord(getattr(text, single)()) if single is not None else ord(text)
self.result.append(('\\%03o' if value <= 0xFF else '\\u%04x') % value)
elif not self.use_format and (c[0:1].isdigit() or c[0:1] == self._group):
self.handle_group(t)
elif c == self._uc:
self.single_case(i, _UPPER)
elif c == self._lc:
self.single_case(i, _LOWER)
elif c == self._uc_span:
self.span_case(i, _UPPER)
elif c == self._lc_span:
self.span_case(i, _LOWER)
elif (
not self.binary and
(first == self._unicode_narrow or (not NARROW and first == self._unicode_wide))
):
uc = compat.uchr(int(t[2:], 16))
text = getattr(uc, attr)()
single = self.get_single_stack()
value = ord(getattr(text, single)()) if single is not None else ord(text)
self.result.append(("\\u%04x" if value <= 0xFFFF else "\\U%08x") % value)
elif first == self._hex:
hc = chr(int(t[2:], 16))
text = getattr(hc, attr)()
single = self.get_single_stack()
value = ord(getattr(text, single)()) if single is not None else ord(text)
self.result.append(self.string_convert("\\x%02x" % value))
else:
self.get_single_stack()
self.result.append(t)
elif self.single_stack:
single = self.get_single_stack()
text = getattr(t, attr)()
if single is not None:
self.result.append(getattr(text[0:1], single)() + text[1:])
else:
self.result.append(getattr(t, attr)())
if self.end_found:
self.end_found = False
break
t = next(i)
except StopIteration:
pass
self.span_stack.pop()
def single_case(self, i, case):
"""Uppercase or lowercase the next character."""
attr = "lower" if case == _LOWER else "upper"
self.single_stack.append(attr)
try:
t = next(i)
if len(t) > 1:
if self.use_format and t[0:1] == self._lc_bracket:
self.handle_format_group(t[1:-1].strip())
else:
c = t[1:]
first = c[0:1]
if first.isdigit() and (self.use_format or len(c) == 3):
value = int(c, 8)
if self.binary:
if value > 0xFF:
# Re fails on octal greater than 0o377 or 0xFF
raise ValueError("octal escape value outside of range 0-0o377!")
value = ord(getattr(compat.uchr(value), self.get_single_stack())())
self.result.append(self.string_convert('\\%03o' % value))
else:
value = ord(getattr(compat.uchr(value), self.get_single_stack())())
self.result.append(('\\%03o' if value <= 0xFF else '\\u%04x') % value)
elif not self.use_format and (c[0:1].isdigit() or c[0:1] == self._group):
self.handle_group(t)
elif c == self._uc:
self.single_case(i, _UPPER)
elif c == self._lc:
self.single_case(i, _LOWER)
elif c == self._uc_span:
self.span_case(i, _UPPER)
elif c == self._lc_span:
self.span_case(i, _LOWER)
elif c == self._end:
self.end_found = True
elif (
not self.binary and
(first == self._unicode_narrow or (not NARROW and first == self._unicode_wide))
):
uc = compat.uchr(int(t[2:], 16))
value = ord(getattr(uc, self.get_single_stack())())
self.result.append(("\\u%04x" if value <= 0xFFFF else "\\U%08x") % value)
elif first == self._hex:
hc = chr(int(t[2:], 16))
self.result.append(
self.string_convert("\\x%02x" % ord(getattr(hc, self.get_single_stack())()))
)
else:
self.get_single_stack()
self.result.append(t)
else:
self.result.append(getattr(t, self.get_single_stack())())
except StopIteration:
pass
def get_single_stack(self):
"""Get the correct single stack item to use."""
single = None
while self.single_stack:
single = self.single_stack.pop()
return single
def handle_format_group(self, text):
"""Handle groups."""
capture = -1
base = 10
try:
index = text.index(self._ls_bracket)
capture = text[index + 1:-1]
text = text[:index]
prefix = capture[1:3] if capture[0:1] == self._minus else capture[:2]
if prefix[0:1] == self._zero:
char = prefix[-1:]
if char == self._binary:
base = 2
elif char == self._octal:
base = 8
elif char == self._hex:
base = 16
except ValueError:
pass
if not isinstance(capture, int):
try:
capture = int(capture, base)
except ValueError:
raise ValueError("Capture index must be an integer!")
# Handle auto or manual format
if text == self._empty:
if self.auto:
text = self.string_convert(self.auto_index)
self.auto_index += 1
elif not self.manual and not self.auto:
self.auto = True
text = self.string_convert(self.auto_index)
self.auto_index += 1
else:
raise ValueError("Cannot switch to auto format during manual format!")
elif not self.manual and not self.auto:
self.manual = True
elif not self.manual:
raise ValueError("Cannot switch to manual format during auto format!")
if len(self.result) > 1:
self.literal_slots.append(self._empty.join(self.result))
self.literal_slots.extend([self._group_start, text, self._group_end])
del self.result[:]
self.result.append(self._empty)
self.slot += 1
else:
self.literal_slots.extend([self._group_start, text, self._group_end])
single = self.get_single_stack()
self.group_slots.append(
(
self.slot,
(
self.span_stack[-1] if self.span_stack else None,
single,
capture
)
)
)
self.slot += 1
def handle_group(self, text):
"""Handle groups."""
if len(self.result) > 1:
self.literal_slots.append(self._empty.join(self.result))
self.literal_slots.append(text)
del self.result[:]
self.result.append(self._empty)
self.slot += 1
else:
self.literal_slots.append(text)
single = self.get_single_stack()
self.group_slots.append(
(
self.slot,
(
self.span_stack[-1] if self.span_stack else None,
single,
-1
)
)
)
self.slot += 1
def get_base_template(self):
"""Return the unmodified template before expansion."""
return self._original
def get_group_index(self, index):
"""Find and return the appropriate group index."""
g_index = None
for group in self.groups:
if group[0] == index:
g_index = group[1]
break
return g_index
def get_group_attributes(self, index):
"""Find and return the appropriate group case."""
g_case = (None, None, -1)
for group in self.group_slots:
if group[0] == index:
g_case = group[1]
break
return g_case
# Template expander
class ReplaceTemplateExpander(object):
"""Backrefereces."""
def __init__(self, match, template):
"""Initialize."""
if template.binary:
ctokens = ctok.btokens
else:
ctokens = ctok.utokens
self.template = template
self._esc_end = ctokens["esc_end"]
self._end = ctokens["end"]
self._lc = ctokens["lc"]
self._lc_span = ctokens["lc_span"]
self._uc = ctokens["uc"]
self._uc_span = ctokens["uc_span"]
self.index = -1
self.end_found = False
self.parent_span = []
self.match = match
def expand(self):
"""Using the template, expand the string."""
sep = self.match.string[:0]
text = []
# Expand string
for x in range(0, len(self.template.literals)):
index = x
l = self.template.literals[x]
if l is None:
g_index = self.template.get_group_index(index)
span_case, single_case, capture = self.template.get_group_attributes(index)
try:
l = self.match.captures(g_index)[capture]
except IndexError:
raise IndexError("'%d' is out of range!" % capture)
if span_case is not None:
l = getattr(l, span_case)()
if single_case is not None:
l = getattr(l[0:1], single_case)() + l[1:]
text.append(l)
return sep.join(text)
class Replace(namedtuple('Replace', ['func', 'use_format', 'pattern_hash'])):
"""Bregex compiled replace object."""
def __call__(self, *args, **kwargs):
"""Call."""
return self.func(*args, **kwargs)
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the ReplaceTemplate or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, Replace):
return repl(m)
elif isinstance(repl, ReplaceTemplate):
return ReplaceTemplateExpander(m, repl).expand()
elif isinstance(repl, (compat.string_type, compat.binary_type)):
return ReplaceTemplateExpander(m, ReplaceTemplate(m.re, repl, bool(flags & FORMAT))).expand()
def _is_replace(obj):
"""Check if object is a replace object."""
return isinstance(obj, (ReplaceTemplate, Replace))
def _apply_search_backrefs(pattern, flags=0):
"""Apply the search backrefs to the search pattern."""
if isinstance(pattern, (compat.string_type, compat.binary_type)):
re_verbose = VERBOSE & flags
if flags & V0:
re_version = V0
elif flags & V1:
re_version = V1
else:
re_version = 0
pattern = RegexSearchTemplate(pattern, re_verbose, re_version).apply()
elif isinstance(pattern, REGEX_TYPE):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern!")
else:
raise TypeError("Not a string or compiled pattern!")
return pattern
def compile_search(pattern, flags=0, **kwargs):
"""Compile with extended search references."""
return regex.compile(_apply_search_backrefs(pattern, flags), flags, **kwargs)
def compile_replace(pattern, repl, flags=0):
"""Construct a method that can be used as a replace method for sub, subn, etc."""
call = None
if pattern is not None and isinstance(pattern, REGEX_TYPE):
if isinstance(repl, (compat.string_type, compat.binary_type)):
repl = ReplaceTemplate(pattern, repl, bool(flags & FORMAT))
call = Replace(
functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash
)
elif isinstance(repl, Replace):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern!")
if repl.pattern_hash != hash(pattern):
raise ValueError("Pattern hash doesn't match hash in compiled replace!")
call = repl
elif isinstance(repl, ReplaceTemplate):
if flags:
raise ValueError("Cannot process flags argument with a ReplaceTemplate!")
call = Replace(
functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash
)
else:
raise TypeError("Not a valid type!")
else:
raise TypeError("Pattern must be a compiled regular expression!")
return call
# Convenience methods like re has, but slower due to overhead on each call.
# It is recommended to use compile_search and compile_replace
def expand(m, repl):
"""Expand the string using the replace pattern or function."""
if isinstance(repl, (Replace, ReplaceTemplate)):
if repl.use_format:
raise ValueError("Replace should not be compiled as a format replace!")
elif not isinstance(repl, (compat.string_type, compat.binary_type)):
raise TypeError("Expected string, buffer, or compiled replace!")
return _apply_replace_backrefs(m, repl)
def expandf(m, format): # noqa B002
"""Expand the string using the format replace pattern or function."""
if isinstance(format, (Replace, ReplaceTemplate)):
if not format.use_format:
raise ValueError("Replace not compiled as a format replace")
elif not isinstance(format, (compat.string_type, compat.binary_type)):
raise TypeError("Expected string, buffer, or compiled replace!")
return _apply_replace_backrefs(m, format, flags=FORMAT)
def match(pattern, string, flags=0, pos=None, endpos=None, partial=False, concurrent=None, **kwargs):
"""Wrapper for match."""
return regex.match(
_apply_search_backrefs(pattern, flags), string,
flags, pos, endpos, partial, concurrent, **kwargs
)
def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False, concurrent=None, **kwargs):
"""Wrapper for fullmatch."""
return regex.fullmatch(
_apply_search_backrefs(pattern, flags), string,
flags, pos, endpos, partial, concurrent, **kwargs
)
def search(pattern, string, flags=0, pos=None, endpos=None, partial=False, concurrent=None, **kwargs):
"""Wrapper for search."""
return regex.search(
_apply_search_backrefs(pattern, flags), string,
flags, pos, endpos, partial, concurrent, **kwargs
)
def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, concurrent=None, **kwargs):
"""Wrapper for sub."""
is_replace = _is_replace(repl)
is_string = isinstance(repl, (compat.string_type, compat.binary_type))
if is_replace and repl.use_format:
raise ValueError("Compiled replace cannot be a format object!")
pattern = compile_search(pattern, flags)
return regex.sub(
pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string,
count, flags, pos, endpos, concurrent, **kwargs
)
def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None, concurrent=None, **kwargs): # noqa B002
"""Wrapper for subf."""
is_replace = _is_replace(format)
is_string = isinstance(format, (compat.string_type, compat.binary_type))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return regex.sub(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string,
count, flags, pos, endpos, concurrent, **kwargs
)
def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, concurrent=None, **kwargs):
"""Wrapper for subn."""
is_replace = _is_replace(repl)
is_string = isinstance(repl, (compat.string_type, compat.binary_type))
if is_replace and repl.use_format:
raise ValueError("Compiled replace cannot be a format object!")
pattern = compile_search(pattern, flags)
return regex.subn(
pattern, (compile_replace(pattern, repl) if is_replace or is_string else repl), string,
count, flags, pos, endpos, concurrent, **kwargs
)
def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None, concurrent=None, **kwargs): # noqa B002
"""Wrapper for subfn."""
is_replace = _is_replace(format)
is_string = isinstance(format, (compat.string_type, compat.binary_type))
if is_replace and not format.use_format:
raise ValueError("Compiled replace is not a format object!")
pattern = compile_search(pattern, flags)
rflags = FORMAT if is_string else 0
return regex.subn(
pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string,
count, flags, pos, endpos, concurrent, **kwargs
)
def split(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"""Wrapper for split."""
return regex.split(
_apply_search_backrefs(pattern, flags), string,
maxsplit, flags, concurrent, **kwargs
)
def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"""Wrapper for splititer."""
return regex.splititer(
_apply_search_backrefs(pattern, flags), string,
maxsplit, flags, concurrent, **kwargs
)
def findall(
pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
concurrent=None, **kwargs
):
"""Wrapper for findall."""
return regex.findall(
_apply_search_backrefs(pattern, flags), string,
flags, pos, endpos, overlapped, concurrent, **kwargs
)
def finditer(
pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
partial=False, concurrent=None, **kwargs
):
"""Wrapper for finditer."""
return regex.finditer(
_apply_search_backrefs(pattern, flags), string,
flags, pos, endpos, overlapped, partial, concurrent, **kwargs
)
|
nilq/baby-python
|
python
|
# Classes for rsinc module
import subprocess
import os
from time import sleep
THESAME, UPDATED, DELETED, CREATED = tuple(range(4))
NOMOVE, MOVED, CLONE, NOTHERE = tuple(range(4, 8))
class File:
def __init__(self, name, uid, time, state, moved, is_clone, synced, ignore):
self.name = name
self.uid = uid
self.time = time
self.state = state
self.moved = moved
self.is_clone = is_clone
self.synced = synced
self.ignore = ignore
def dump(self):
return (
self.uid,
self.time,
self.state,
self.moved,
self.is_clone,
self.synced,
self.ignore,
)
class Flat:
def __init__(self, path):
self.path = path
self.names = {}
self.uids = {}
self.lower = set()
self.dirs = set()
def update(
self,
name,
uid,
time=0,
state=THESAME,
moved=False,
is_clone=False,
synced=False,
ignore=False,
):
self.names.update(
{
name: File(
name, uid, time, state, moved, is_clone, synced, ignore
)
}
)
self.lower.add(name.lower())
d = os.path.dirname(name)
d = os.path.join(self.path, d)
self.dirs.add(d)
if uid in self.uids:
self.names[name].is_clone = True
self.uids[uid].is_clone = True
self.uids.update({uid: self.names[name]})
else:
self.uids.update({uid: self.names[name]})
def clean(self):
for file in self.names.values():
file.synced = False
def rm(self, name):
if not self.names[name].is_clone:
del self.uids[self.names[name].uid]
del self.names[name]
self.lower.remove(name.lower())
def tag_ignore(self, regexs):
for name, file in self.names.items():
if any(r.match(os.path.join(self.path, name)) for r in regexs):
file.ignore = True
else:
file.ignore = False
def rm_ignore(self):
for name, file in tuple(self.names.items()):
if file.ignore:
self.rm(name)
class Struct:
def __init__(self):
self.count = 0
self.total = 0
self.lcl = None
self.rmt = None
self.dry = True
self.case = True
self.pool = None
self.rclone_flags = []
class SubPool:
def __init__(self, max_workers):
self.procs = []
self.max_workers = max_workers
def run(self, cmd):
if len(self.procs) < self.max_workers:
self.procs.append(subprocess.Popen(cmd))
return
else:
done = None
while done is None:
done = self._find_done_process()
self.procs.pop(done).terminate()
self.run(cmd)
def _find_done_process(self):
for c, proc in enumerate(self.procs):
poll = proc.poll()
if poll == 0:
return c
elif poll is None:
sleep(0.01)
continue
else:
print("Error polled:", poll, "with", proc.args)
return c
return None
def wait(self):
for proc in self.procs:
proc.wait()
proc.terminate()
self.procs = []
|
nilq/baby-python
|
python
|
import json
import csv
import argparse
import http.client
import base64
fieldnames = ("TenantID","First Name","Last Name","Extension","Voice DID","Fax DID","Caller ID","ID for MS Exchange","Home Phone","Cell Phone","Fax Number",
"E-mail","Alternate E-mail","User Name","Password","PIN","Pseudonym","User Profile","ID","Admin Profile","Paging Profile","Recording Profile","Home MX",
"Current MX", "Default Role","Assigned Device(s)","CallGroup","AA")
admin_endpoint = ""
def conn_to_admin(ahost,no_ssl):
if no_ssl:
return http.client.HTTPConnection(ahost,timeout=5)
else:
return http.client.HTTPSConnection(ahost,timeout=5)
def main(ahost, admin_name=None, admin_pass=None, no_ssl=False):
admin_conn = conn_to_admin(ahost,no_ssl)
headers = {"Content-type": "application/json"}
if admin_name is not None and admin_pass is not None:
userAndPass = base64.b64encode(str.encode(admin_name) + b":" + str.encode(admin_pass)).decode("ascii")
headers["Authorization"] = "Basic %s" % userAndPass
try:
admin_conn.request("GET", admin_endpoint + "/users", headers=headers)
except Exception as e:
print("Connection error")
print(e)
exit(1)
response = admin_conn.getresponse()
if response.status != 200:
print(response.status, response.reason)
admin_conn.close()
exit(2)
user_list = json.loads(response.read())['users']
with open("mxv_user_list.csv","w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
for user in user_list:
try:
admin_conn.request("GET", admin_endpoint + "/users/" + user, headers=headers)
except Exception as e:
print("Connection error")
print(e)
admin_conn.close()
exit(1)
response = admin_conn.getresponse()
user_data = json.loads(response.read())
mx = user_data['services']['MX']
# Write to CSV file
writer.writerow({"TenantID" : user_data['tenant'] if 'tenant' in user_data else None,
"First Name" : mx['first_name'],
"Last Name" : mx['last_name'],
"Cell Phone" : mx['mobile_number'],
"E-mail" : user,
"User Name" : mx['account_name'],
"Password" : mx['account_pwd'],
"PIN" : mx['account_pin'],
"Extension" : mx['extension'] if 'extension' in mx else None,
"ID" : mx['id'] if 'id' in mx else None
})
admin_conn.close()
exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--admin-name', dest='admin_name', help='Admin username for provisioning if configured', metavar='NAME')
parser.add_argument('--admin-pass', dest='admin_pass', help='Admin password for provisioning if configured', metavar='PASS')
parser.add_argument('--no-ssl', dest='no_ssl', action='store_true', help='If provided, connection is on unsecured HTTP. Default is False')
requiredArg = parser.add_argument_group('required arguments')
requiredArg.add_argument('--admin-host', dest='admin_host', help='Provisioning server administrator API host address', metavar='<example.com>', required=True)
args = parser.parse_args()
main(args.admin_host, args.admin_name, args.admin_pass, args.no_ssl)
|
nilq/baby-python
|
python
|
import os
from itertools import product
import re
from numpy import append, array, bincount, diff, ma, sort #cumsum, nditer, roll, setdiff1d, where
from numpy import product as np_prod
seating_re = re.compile('[L\.]')
workPath = os.path.expanduser("~/Documents/Code/Advent_of_code/2020")
os.chdir(workPath)
#with open("day-11_data.txt", "r") as in_file:
with open("test_data.txt", "r") as in_file:
data = array([list(row.strip()) for row in in_file])
empty_seats = ma.masked_where(data == 'L', data).mask
floor = ma.masked_where(data == '.', data).mask
occupied_seats = ma.masked_where(data == '#', data).mask
occupied = array([[False, False, False], [False, True, False], [False, False, False]])
# Part 1:
sorted_adapters = sort(data)
sorted_adapters = append(append(array([0]), sorted_adapters), sorted_adapters[-1]+3)
jolts = diff(sorted_adapters)
distribution = {k:v for k, v in zip(range(max(set(jolts))+4), bincount(jolts))}
print(f"The product of the counts of 1- and 3-jolt differences is {distribution[1]*distribution[3]}")
# Part 2:
def possible_permutations(n, m):
perms = (i for i in product(list(range(m + 1)), repeat=n) if sum(i) == n)
return set(tuple(n for n in sublist if n != 0) for sublist in perms)
max_step = 3
reps = re.findall('1{2,}', ''.join([str(i) for i in jolts]))
rep_lens = [len(i) for i in reps]
perm_dict = {s:len(possible_permutations(s, max_step)) for s in range(2, max(rep_lens) + 1)}
counts = np_prod([perm_dict[possibilities] for possibilities in rep_lens])
print(f"There are {counts} possible permutations of the adapters")
|
nilq/baby-python
|
python
|
import re
import unittest
from rexlex import Lexer
from rexlex.lexer.itemclass import get_itemclass
class TestableLexer(Lexer):
"""Test tuple state transitions including #pop."""
LOGLEVEL = None
re_skip = re.compile('\s+')
tokendefs = {
'root': [
('Root', 'a', 'bar'),
('Root', 'e'),
],
'foo': [
('Foo', 'd'),
],
'bar': [
('Bar', 'b', 'bar'),
('Bar', 'c', 'foo'),
],
}
class TupleTransTest(unittest.TestCase):
text = 'abcde'
Item = get_itemclass(text)
expected = [
Item(start=0, end=1, token='Root'),
Item(start=1, end=2, token='Bar'),
Item(start=2, end=3, token='Bar'),
Item(start=3, end=4, token='Foo'),
Item(start=4, end=5, token='Root')]
def test(self):
toks = list(TestableLexer(self.text))
self.assertEqual(toks, self.expected)
|
nilq/baby-python
|
python
|
from unityagents import UnityEnvironment
from utils import dqn, get_env_spec
from dqn_agents import Agent
import os
import argparse
EXPS_ROOT_PATH = './data'
parser=argparse.ArgumentParser(description="train a RL agent in Unity Banana Navigation Environment")
parser.add_argument('-n', '--name', type=str, metavar='', default='no-name-exp', help="name of the training run (default no-name-exp)")
parser.add_argument('-s', '--save_trace', type=bool, metavar='', default=True, help='whether to save the training trace')
parser.add_argument('-M', '--max_score', type=float, metavar='', default=13.0, help="the pass score a trained agent should achieve")
parser.add_argument('-ra', '--seed', type=int, metavar='', default=0, help='random seed of the agent')
parser.add_argument('-dd', '--double_dqn', action='store_true', help='whether to use double dqn training')
parser.add_argument('-du', '--dueling_dqn', action='store_true', help='whether to use dueling dqn arch')
args=parser.parse_args()
if __name__ == "__main__":
# define and check environment information:
env = UnityEnvironment(file_name="./Banana_Linux/Banana.x86_64")
env_spec = get_env_spec(env)
# define our agent
agent = Agent(state_size=env_spec['state_size'],
action_size=env_spec['action_size'],
seed=args.seed,
double_dqn=args.double_dqn,
dueling_dqn=args.dueling_dqn,
dqn_way_update=False)
# create exp_dir for saving
exp_dir = os.path.join(EXPS_ROOT_PATH, args.name)
os.makedirs(exp_dir, exist_ok=True)
# the main algorithm
scores = dqn(agent, env,
max_score=args.max_score,
save_dir=exp_dir)
# save training trace
if args.save_trace:
with open(os.path.join(exp_dir, 'progress.txt'), 'w') as myfile:
myfile.write(str(scores))
myfile.close()
|
nilq/baby-python
|
python
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
app_name='todoapp'
urlpatterns = [
path('',views.home, name='home'),
path('index',views.lhome, name='lhome'),
# Delete Paths
path('<int:todo_id>/delete', views.delete, name='delete'),
path('<int:cat_id>/deletecategory', views.deletecategory, name='deletecategory'),
#Update Paths
path('<int:todo_id>/update', views.update, name='update'),
# Add Paths
path('add', views.add, name='add'),
path('addcategory', views.addcategory, name='addcategory'),
path('permisions', views.permisions, name='permisions'),
path('addpermision/', views.addpermision, name='addpermision'),
path('filtertodos/', views.filtertodos, name='filtertodos'),
path('filtertodosdate/', views.filtertodosdate, name='filtertodosdate'),
#User Registration url
path('login', views.userloginurl, name='userloginurl'),
path('register', views.register, name='register'),
path('logind', views.logind, name='logind'),
path('logout', views.logout_view, name='logout'),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Transpose chroma matrix by nTransp semitones up (right rotation) where nTransp is 1st argument.
# If two additional arguments are present, those are input and output file paths, respectively.
# Otherwise, read/write on STDIN
import sys, csv
if __name__ == '__main__':
ntransp = (int(sys.argv[1]) + 12) % 12
instream = sys.stdin
outstream = sys.stdout
if len(sys.argv) == 4 :
instream = open(sys.argv[2],'r')
outstream = open(sys.argv[3],'w')
reader = csv.reader(instream)
writer = csv.writer(outstream)
for line in reader :
outline = line[-ntransp:] + line[:-ntransp]
writer.writerow(outline)
if len(sys.argv) == 4 :
outstream.close()
|
nilq/baby-python
|
python
|
from collections import defaultdict
import networkx as nx
import numpy as np
import hashlib
from .solver_utils import root_finder, get_edge_length
def find_split(
nodes,
priors=None,
considered=set(),
fuzzy=False,
probabilistic=False,
minimum_allele_rep=1.0,
):
# Tracks frequency of states for each character in nodes
character_mutation_mapping = defaultdict(int)
# Tracks frequency of dropout for each character in nodes
missing_value_prop = defaultdict(float)
# Accounting for frequency of mutated states per character, in order to choose the best split
for node in nodes:
node_list = node.split("_")[0].split("|")
for i in range(0, len(node_list)):
char = node_list[i]
if char == "-":
missing_value_prop[str(i)] += 1.0 / len(nodes)
if (str(i), char) not in considered:
# you can't split on a missing value or a 'None' state
if char != "0" and char != "-":
if priors:
character_mutation_mapping[(str(i), char)] -= np.log(
priors[int(i)][char]
)
else:
character_mutation_mapping[(str(i), char)] += 1
# Choosing the best mutation to split on (ie character and state)
character, state = 0, 0
max_cost = 0
min_prior = 1
if priors:
for i in priors.keys():
for j in priors[i].keys():
min_prior = min(min_prior, priors[i][j])
if probabilistic:
entries, vals = (
list(character_mutation_mapping.keys()),
list(character_mutation_mapping.values()),
)
tot = np.sum([v for v in vals])
probs = [v / tot for v in vals]
entry = entries[np.random.choice(list(range(len(entries))), p=probs)]
character, state = int(entry[0]), entry[1]
else:
epsilon = 0
for i, j in character_mutation_mapping:
if fuzzy:
epsilon = np.random.normal()
if (
max_cost < (character_mutation_mapping[(i, j)] + epsilon)
and missing_value_prop[str(i)] < minimum_allele_rep
):
max_cost = character_mutation_mapping[(i, j)]
character, state = i, j
character = int(character)
return character, state
def classify_missing_value(
node,
left_split,
right_split,
knn_neighbors,
knn_distances,
theta=0.1,
kernel=True,
mode="knn",
lookahead_depth=3,
left_states=[],
right_states=[],
):
"""
Classifies a cell with a missing value as belonging in the left split or the right split of a character split. This function will return a
boolean indicating whether or not the node belongs in the right split (i.e. has the charcter state).
:param node:
A node, represented as a character string: 'Ch1|Ch2|....|Chn'
:param left_split:
A list of nodes that are inferred not to have the character state (i.e. negatives)
:param right_split:
A list of nodes that are inferred to have the character state (i.e. positives)
:param knn_neighbors:
A dictionary storing for each node its closest neighbors
:param knn_distances:
A dictionary storing for each node the allele distances to its closest neighbors. These should be modified allele distances
:param theta:
Width of the Gaussian Kernel used to smooth the KNN distances. Only used if kernel = True and mode = 'knn' (default)
:param kernel:
Apply a Guassian kernel to smooth the KNN distances. Only used if mode = 'knn' (default)
:param mode:
Choose a mode to classify negative cells:
- 'knn': assign based on a k-nearest-neighbor approach
- 'avg': assign based on average similarity to either groups using a naive hamming distance
- 'modified_avg': assign based on average similairty using a slightly more nuanced similarity function (A-A + 2, A-None + 1, None-None/Missing-A + 0)
:return:
Returns a boolean - True if the node belongs in the right split and False if it belongs in the left split.
"""
right_split_score = 0
left_split_score = 0
if mode == "knn":
for n_i, neighbor in zip(range(len(knn_neighbors[node])), knn_neighbors[node]):
if neighbor in right_split:
if not kernel:
right_split_score += 1
else:
right_split_score += np.exp(
-1 * knn_distances[node][n_i] / 0.1 ** 2
)
if neighbor in left_split:
# if the neighbor isn't in the right split, by default we prefer to put it
# into the left split
if not kernel:
left_split_score += 1
else:
left_split_score += np.exp(-1 * knn_distances[node][n_i] / 0.1 ** 2)
if not kernel:
normfact = len(knn_neighbors[node])
else:
normfact = np.sum(
[
np.exp(knn_distances[node][n_i])
for n_i in range(len(knn_neighbors[node]))
]
)
avg_right_split_score = right_split_score / normfact
avg_left_split_score = left_split_score / normfact
elif mode == "avg":
node_list = node.split("|")
num_not_missing = len([n for n in node_list if n != "-"])
for i in range(0, len(node_list)):
if node_list[i] != "0" and node_list[i] != "-":
for node_2 in left_split:
node2_list = node_2.split("|")
if node_list[i] == node2_list[i]:
left_split_score += 1
for node_2 in right_split:
node2_list = node_2.split("|")
if node_list[i] == node2_list[i]:
right_split_score += 1
avg_left_split_score = left_split_score / float(
len(left_split) * num_not_missing + 1
)
avg_right_split_score = right_split_score / float(
len(right_split) * num_not_missing + 1
)
elif mode == "modified_avg":
node_list = node.split("|")
for i in range(0, len(node_list)):
for node_2 in left_split:
node2_list = node_2.split("|")
if node_list[i] == node2_list:
left_split_score += 2
if node_list[i] == "0" or node2_list[i] == "0":
left_split_score += 1
for node_2 in right_split:
node2_list = node_2.split("|")
if node_list[i] == node2_list:
right_split_score += 2
if node_list[i] == "0" or node2_list[i] == "0":
right_split_score += 1
avg_left_split_score = left_split_score / float(len(left_split) + 1)
avg_right_split_score = right_split_score / float(len(right_split) + 1)
elif mode == "lookahead":
node_list = node.split("|")
left_score, right_score = 0, 0
for char in left_states:
if node_list[char] == left_states[char]:
left_score = left_score + 1
for char in right_states:
if node_list[char] == right_states[char]:
right_score = right_score + 1
avg_right_split_score = right_score
avg_left_split_score = left_score
else:
raise Exception(
"Classification method not recognized. Please choose from: lookahead, knn, avg, modified_avg"
)
if avg_right_split_score >= avg_left_split_score:
return True
return False
def perform_split(
nodes,
character,
state,
knn_neighbors,
knn_distances,
considered,
missing_data_mode="lookahead",
lookahead_depth=3,
):
"""
Performs a split on a given character and state, separating the set of targets into two mutually exclusive groups based on the
presence or absence of the character state. This procedure also will classify cells with missing values in the selected character,
using the `classify_missing_value` function.
:param targets:
A list of target nodes, where each node is in the form 'Ch1|Ch2|....|Chn'
:param character:
An integer indicating the position in the character array to consider.
:param state:
An integer indicating a particular state in the character on which to split.
:return:
Returns a set of two lists - right_split and left_split - segmenting the targets. Cells in the right split were inferred to have
the character state and those in the left split did not.
"""
# Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character
# Right split is where nodes with the mutation go, everyone else goes to left split or NA chars
left_split, right_split, NA_chars = [], [], []
for node in nodes:
node_list = node.split("|")
if node_list[character] == state:
right_split.append(node)
elif node_list[character] == "-":
NA_chars.append(node)
else:
left_split.append(node)
# order NA_chars by "strongest" candidates for imputation
if missing_data_mode == "knn":
NA_scores = []
for node in NA_chars:
score = 0
for neighbor in knn_neighbors[node]:
if neighbor in right_split or neighbor in left_split:
score += 1
NA_scores.append(score)
NA_dict = dict(zip(NA_chars, NA_scores))
else:
NA_dict = dict(zip(NA_chars, [1] * len(NA_chars)))
left_states, right_states = [], []
if missing_data_mode == "lookahead":
left_states = look_ahead_helper(
left_split, lookahead_depth, dict(), considered.copy()
)
right_states = look_ahead_helper(
right_split, lookahead_depth, dict(), considered.copy()
)
# Seperates all nodes with NA in the character chosen to be split upon
# Puts in right split or left split based on which list shares more mutated characters with this string
for node, score in sorted(NA_dict.items(), key=lambda kv: kv[1]):
if classify_missing_value(
node,
left_split,
right_split,
knn_neighbors,
knn_distances,
theta=0.1,
kernel=True,
mode=missing_data_mode,
lookahead_depth=lookahead_depth,
left_states=left_states,
right_states=right_states,
):
right_split.append(node)
else:
left_split.append(node)
return left_split, right_split
def look_ahead_helper(targets, depth, splits, considered):
if depth == 0 or len(targets) == 1 or len(targets) == 0:
splits_temp = splits.copy()
return splits_temp
else:
character, state = find_split(targets, considered=considered.copy())
splits[character] = state
considered.add((str(character), state))
left_split, right_split, NA_chars = [], [], []
for node in targets:
node_list = node.split("|")
if node_list[character] == state:
right_split.append(node)
elif node_list[character] == "-" or node_list[character] == "H":
NA_chars.append(node)
else:
left_split.append(node)
left_states = look_ahead_helper(
left_split, depth - 1, splits.copy(), considered.copy()
)
right_states = look_ahead_helper(
right_split, depth - 1, splits.copy(), considered.copy()
)
right_states.update(left_states)
return right_states
def greedy_build(
nodes,
knn_neighbors,
knn_distances,
priors=None,
cell_cutoff=200,
lca_cutoff=None,
considered=set(),
uniq="",
targets=[],
fuzzy=False,
probabilistic=False,
minimum_allele_rep=1.0,
missing_data_mode="lookahead",
lookahead_depth=3,
):
"""
Greedy algorithm which finds a probable mutation subgraph for given nodes.
This algorithm chooses splits within the tree based on which mutation occurs most frequently,
weighted by the prior probabilities of each mutation state for each character.
Strings with NA ('-') as a state in the split character are segregated with the
set of nodes which they most closely match to w.r.t. all other characters.
:param nodes:
A list of target nodes, where each node is in the form 'Ch1|Ch2|....|Chn'
:param knn_neighbors:
A dictionary storing for each node its closest neighbors
:param knn_distances:
A dictionary storing for each node the allele distances to its closest neighbors. These should be modified allele distances
:param priors:
A nested dictionary containing prior probabilities for [character][state] mappings
where characters are in the form of integers, and states are in the form of strings,
and values are the probability of mutation from the '0' state.
:param cutoff:
A cutoff that tells the greedy algorithm to stop, and return a partial sub-tree
Set to -1 to run through to the individual samples (ie return the full tree)
:param considered:
Internal parameter which keeps track of which mutations have been considered in a set
DO NOT MODIFY
:param uniq:
Internal parameter which keeps track of the path of mutations (1 = mutation taken, 0 = mutation not taken)
DO NOT MODIFY
:return:
Returns a graph which contains splits as nodes in the form "character state (uniq_identifier)", and leaves
as either samples, or the roots of the subsets of samples that need to be considered by another algorithm.
Edges are labeled with the corresponding mutation taken
AND
a list in the form [[sub_root, sub_samples],....] which is a list of subproblems still needed to be solved
"""
# G models the network that is returned recursively
G = nx.DiGraph()
root = root_finder(nodes)
if lca_cutoff is not None:
distances = [get_edge_length(root, t) for t in nodes]
# Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes
if lca_cutoff is not None:
if max(distances) <= lca_cutoff or len(nodes) == 1:
root = root_finder(nodes)
G.add_node(root)
return G, [[root, nodes]]
else:
if len(nodes) <= cell_cutoff or len(nodes) == 1:
root = root_finder(nodes)
G.add_node(root)
return G, [[root, nodes]]
character, state = find_split(
nodes,
priors=priors,
considered=considered.copy(),
fuzzy=fuzzy,
probabilistic=probabilistic,
minimum_allele_rep=minimum_allele_rep,
)
# If there is no good split left, stop the process and return a graph with the remainder of nodes
if character == 0 and state == 0:
if len(nodes) == 1:
G.add_node(nodes[0])
else:
for i in range(0, len(nodes)):
if nodes[i] != root:
G.add_edge(root, nodes[i])
return G, []
# Add character, state that split occurred to already considered mutations
considered.add((str(character), state))
left_split, right_split = perform_split(
nodes,
character,
state,
knn_neighbors,
knn_distances,
considered.copy(),
missing_data_mode,
lookahead_depth,
)
# Create new graph for storing results
G = nx.DiGraph()
splitter = root
# Recursively build left side of network (ie side that did not mutation at the character with the specific state)
G.add_node(splitter)
left_subproblems = []
left_network = None
if len(left_split) != 0:
left_root = root_finder(left_split)
left_network, left_subproblems = greedy_build(
left_split,
knn_neighbors,
knn_distances,
priors,
cell_cutoff,
lca_cutoff,
considered.copy(),
uniq + "0",
targets,
fuzzy,
probabilistic,
minimum_allele_rep,
missing_data_mode,
lookahead_depth,
)
left_nodes = [
node for node in left_network.nodes() if left_network.in_degree(node) == 0
]
dup_dict = {}
for n in left_network:
if n in list(G.nodes()) and n != left_root:
dup_dict[n] = (
n + "_" + str(hashlib.md5(left_root.encode("utf-8")).hexdigest())
)
left_network = nx.relabel_nodes(left_network, dup_dict)
G = nx.compose(G, left_network)
if root != left_root:
G.add_edge(splitter, left_root, weight=0, label="None")
# Recursively build right side of network
right_network, right_subproblems = greedy_build(
right_split,
knn_neighbors,
knn_distances,
priors,
cell_cutoff,
lca_cutoff,
considered.copy(),
uniq + "1",
targets,
fuzzy,
probabilistic,
minimum_allele_rep,
missing_data_mode,
lookahead_depth,
)
right_nodes = [
node for node in right_network.nodes() if right_network.in_degree(node) == 0
]
right_root = root_finder(right_split)
dup_dict = {}
for n in right_network:
if n in list(G.nodes()) and n != right_root:
dup_dict[n] = (
n + "_" + str(hashlib.md5(right_root.encode("utf-8")).hexdigest())
)
for n in dup_dict:
rename_dict = {n: dup_dict[n]}
if right_network.out_degree(n) != 0:
right_network = nx.relabel_nodes(right_network, rename_dict)
else:
rename_dict = {n: dup_dict[n]}
G = nx.relabel_nodes(G, rename_dict)
G = nx.compose(G, right_network)
if root != right_root:
if not priors:
G.add_edge(
splitter,
right_root,
weight=1,
label=str(character) + ": 0 -> " + str(state),
)
else:
G.add_edge(
splitter,
right_root,
weight=-np.log(priors[int(character)][state]),
label=str(character) + ": 0 -> " + str(state),
)
return G, left_subproblems + right_subproblems
def compute_entropy_of_split(cells):
C = len(cells[0].split("|"))
N = len(cells)
entropies = []
for c in range(C):
counts_per_state = defaultdict(int)
for cell in cells:
state = cell.split("|")[c]
counts_per_state[state] += 1
# convert counts to frequencies
counts_per_state = dict([(k, v / N) for k, v in counts_per_state.items()])
ent = -1 * np.sum([p * np.log(p) for p in counts_per_state.values()])
entropies.append(ent)
return np.mean(entropies)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatch
def DrawPlotOnPage(N, CanvasSize_W, CanvasSize_H, Lval, Tval, Wval, Hval, solNo):
#print("plotter called")
fig, ax = plt.subplots()
rectangles = []
for x in range(N):
myRect = mpatch.Rectangle((Lval[x], Tval[x]), Wval[x], Hval[x], edgecolor='0.5')
rectangles.append(myRect)
#print("Rectangles are:",rectangles)
x = 0
for r in rectangles:
#print("X is ",x,"At rectange",r)
ax.add_artist(r)
rx, ry = r.get_xy()
cx = rx + r.get_width() / 2.0
cy = ry + r.get_height() / 2.0
ax.annotate(str(x), (cx, cy), color='black', weight='bold', fontsize=6, ha='center', va='center')
x=x+1
ax.set_xlim((0, CanvasSize_W))
ax.set_ylim((0, CanvasSize_H))
ax.set_aspect('equal')
#plt.title("")
## New start
plt.axis([0, CanvasSize_W, 0, CanvasSize_H])
plt.grid(False) # set the grid
ax = plt.gca() # get the axis
ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis
ax.xaxis.tick_top() # and move the X-Axis
plt.savefig("output/Test"+(str(solNo)+".png"))
plt.close()
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# This utility will generate the swift code from the c Fit SDK
# You can download the Fit SDK from https://developer.garmin.com/fit and update your local copy using the diffsdk.py script
#
# in the python directory run ./fitsdkparser.py generate Profile.xlsx
#
#
import re
import argparse
import json
import pprint
import openpyxl
import logging
import os
from inspect import currentframe,getframeinfo
def fix_variable_name( var_name ):
'''
fix for reserved names
'''
if var_name == 'switch':
return 'switch_'
return var_name
base_type_alignments = {
'uint16': 2,
'enum': 1,
'bool': 1,
'sint8': 1,
'uint8':1,
'sint16':2,
'uint16':2,
'sint32':4,
'uint32':4,
'string':1,
'uint8z':1,
'uint16z':2,
'uint32z':4,
'byte':1,
'sint64':8,
'uint64':8,
'uint64z':8,
'float32':4
}
def first_line_with_annotate_comment(prefix = '', annotate = True):
if not annotate:
return []
previous_frame = currentframe().f_back
(filename, line_number,function_name, lines, index) = getframeinfo(previous_frame)
return [ '{}//Generated by {} at {}:{}'.format( prefix, function_name, os.path.basename(filename), line_number ) ]
class Type :
'''
Represent a type and its values
name: type name (ex: mesg_num)
base_type: c type (ex: uint16)
type_num: internal number for the type
values: dict of name/value (ex: [{'name': 'record', 'value':20 },{'name':'session','value'::18 } ] )
values_map: dict name: value (ex: {'record':20, 'session':18 } )
'''
def __init__(self,name, base_type, type_num,annotate=False):
self.name = name
self.base_type = base_type
self.type_num = type_num
self.values = []
self.values_map = {}
self.annotate = annotate
def fit_type(self):
return 'FIT_{}'.format( self.name.upper() )
def add_row(self,row):
if len(row)>4 and row[0] is None and row[1] is None:
self.values.append( { 'name': row[2], 'value':row[3] } )
self.values_map[row[2]] = row[3]
return True
else:
return False
def __repr__(self):
return 'Type({}<{}>={}{{{}}})'.format( self.name, self.base_type, self.type_num, len(self.values) )
def description(self):
rv = [ '{}'.format( self ) ]
for d in self.values:
rv.append( ' {}: {}'.format( d['value'],d['name'] ) )
return '\n'.join(rv)
def value_for_string(self,val):
return self.values_map[val]
#--- swift type
def swift_stmt_extension(self,use_type):
rv = first_line_with_annotate_comment('', annotate = self.annotate)
rv.extend( [ 'public extension {} {{'.format( use_type ),
' func name() -> String {',
' return {}(self)'.format(self.swift_fname_to_string()),
' }',
' static let invalid : FitMessageType = 0xFFFF'
] )
for d in self.values:
rv.append( ' static let {} : {} = {}'.format( d['name'], use_type, d['value'] ) )
rv.append( '}' )
return rv
def swift_fname_to_string(self):
return f'rzfit_swift_string_from_{self.name}'
def swift_func_to_string(self,fileprivate=True):
rv = first_line_with_annotate_comment('',annotate = self.annotate)
rv.extend( [ '{}func {}(_ input : {}) -> String'.format( 'fileprivate ' if fileprivate else 'public ', self.swift_fname_to_string(), self.objc_type() ),
'{',
' switch input {{'.format( self.name ),
] )
for d in self.values:
rv.append( ' case {}: return "{}"'.format( d['value'], d['name'] ) )
rv.append( ' default: return "{}_\(input)"'.format( self.name) )
rv.extend( [ ' }',
'}',
''] )
return rv
def swift_fname_from_string(self):
return f'rzfit_swift_string_to_{self.name}'
def swift_func_from_string(self,fileprivate=True):
rv = first_line_with_annotate_comment(prefix = '', annotate=self.annotate)
rv.extend( [ '{}func {}(_ input : String) -> {}'.format( 'fileprivate ' if fileprivate else 'public ', self.swift_fname_from_string(), self.objc_type() ),
'{',
' switch input {'
] )
for d in self.values:
rv.append( ' case "{}": return {};'.format( d['name'], d['value'] ) )
rv.append( ' default: return {}_INVALID;'.format( self.objc_type() ) )
rv.extend( [ ' }',
'}',
'' ] )
return rv
def swift_stmt_case_type_function_call(self):
rv = first_line_with_annotate_comment(prefix = '', annotate=self.annotate)
rv.extend( [
' case {}: return {}( {}(val) )'.format(self.type_num, self.swift_fname_to_string(),self.objc_type() )
] )
return rv
def swift_fname_reverse_value(self):
return 'rzfit_swift_reverse_value_{}'.format( self.name )
def swift_func_reverse_value(self):
rv = first_line_with_annotate_comment(prefix = '', annotate=self.annotate)
rv.extend( [ 'fileprivate func {}(value : String) -> RzFitSwiftValue'.format( self.swift_fname_reverse_value() ),
'{',
' switch value {'
] )
for d in self.values:
rv.append( ' case "{}": return .string("{}")'.format( d['value'],d['name'] ) )
rv.append( ' case "{}": return .string("{}")'.format( d['name'],d['value'] ) )
rv.append( ' default: return .unknown'.format( self.objc_type() ) )
rv.extend( [ ' }',
'}',
'' ] )
return rv
#--- objc type
def objc_type(self):
return 'FIT_{}'.format( self.base_type.upper() )
def objc_typedef(self):
rv = first_line_with_annotate_comment('',annotate = self.annotate)
rv.extend( [ 'typedef {} {};'.format( self.objc_type(), self.fit_type() ) ] )
elems = []
sizes = (0,0)
for d in self.values:
one = ( '{}_{}'.format(self.fit_type(), d['name'].upper() ),
'(({}){})'.format(self.fit_type(), d['value'] ) )
elems.append( one )
sizes = ( max(sizes[0],len(one[0])), max( sizes[1],len(one[1]) ) )
rv.append( '#define {0: <{width0}} {1: <{width1}}'.format( f'{self.fit_type()}_INVALID', f'{self.objc_type()}_INVALID', width0=sizes[0], width1=sizes[1] ) )
for e in elems:
rv.append( '#define {0: <{width0}} {1: <{width1}}'.format( e[0], e[1], width0=sizes[0], width1=sizes[1] ) )
rv.append( '' )
return rv
def objc_fname_to_string(self):
return 'rzfit_objc_string_from_{}'.format( self.name )
def objc_func_to_string(self,fileprivate=True):
var_name = fix_variable_name( self.name )
rv = first_line_with_annotate_comment('',annotate = self.annotate)
rv.extend( [ '{}NSString * {}( {} {} ){{'.format( 'static ' if fileprivate else '', self.objc_fname_to_string(), self.objc_type(), var_name ),
' switch({}){{'.format( var_name )
] )
for d in self.values:
rv.append( ' case {}: return @"{}";'.format( d['value'], d['name'] ) )
rv.extend( [ ' default: return [NSString stringWithFormat:@"{}_%u", (unsigned int){}];'.format( self.name, var_name ),
' }',
'}',
'',
''
] )
return rv
def objc_stmt_case_type_function_call(self):
rv = first_line_with_annotate_comment(' ', annotate = self.annotate )
rv.extend( [
' case {}: return {}( ({}) val);'.format( self.type_num, self.objc_fname_to_string(), self.objc_type() ),
] )
return rv
class Field:
'''
field_num: field number (ex: 1)
name: field name (ex: 'manufacturer' or 'product')
type_name: type (ex: 'manufacturer' or 'uint16')
scale: None or value
offset: None or value
unit: None or str (ex: 'bpm')
member: member in a struct
references: None or array of sub fields with reference_field/reference_field_value (ex [ Field(garmin_product) ]
reference_field: None or array of field to check if should be used (ex: ['manufacturer','sport'] )
reference_field_value: None or array of value to check if should be used (ex: ['garmin','running'] )
'''
def __init__(self,ctx,row):
self.field_num = row[1]
self.annotate = ctx.annotate
self.name = row[2]
self.type_name = row[3]
self.objc_type = 'FIT_{}'.format(self.type_name.upper() )
self.scale = row[6]
self.offset = row[7]
self.unit = row[8]
self.unit_num = ctx.unit_num( self.unit )
if self.type_name in ctx.types:
self.base_type = ctx.types[self.type_name].base_type
else:
self.base_type = self.type_name
self.objc_base_type = 'FIT_{}'.format(self.base_type.upper() )
self.member = self.name
self.is_value = False
self.is_date = False
self.is_string = False
self.is_fit_type = False
self.is_array = False
self.fit_type = None
self.array_size = None
self.include = False
self.is_switched = False
self.switch_require_complete = False
# some fields seem to be default, some other will require rest to be there
# need to keep track so we default in switch to main field or wait for more information
if not self.unit:
self.switch_require_complete = True
if self.type_name.endswith( 'date_time' ):
self.is_date = True
elif self.type_name in ctx.types:
self.is_fit_type = True
self.fit_type = ctx.types[self.type_name]
elif self.type_name == 'string':
self.is_string = True
else:
self.is_value = True
if row[4]:
self.is_array = True
if row[4] != '[N]':
# sometime the size is there
digits = re.findall(r'\d+', row[4])
self.array_size = int( digits[0] )
if row[15]:
self.include = True
if (self.is_array or self.is_string) and not self.array_size:
self.array_size = int( row[15] )
self.reference_field = row[11]
if row[11]:
self.reference_field = row[11].replace( '\n','').split( ',' )
else:
self.reference_field = []
if row[12]:
self.reference_field_value = row[12].replace( '\n','').split( ',' )
else:
self.reference_field_value = []
if len(self.reference_field_value) != len(self.reference_field):
print( 'bug inconsistent reference_field {} {} {}'.format( self.name, row[11], row[12] ) )
self.references = []
def add_reference(self,ctx,row):
field = Field(ctx,row)
if field.is_fit_type:
if self.references and not self.is_fit_type:
logging.debug( 'swifted field {} has value and enum, assuming value'.format( self ) )
if not self.references:
self.is_fit_type = True
self.is_value = False
self.is_switched = True
self.references.append( field )
def type_category(self):
base = self.type_name
if self.is_date:
base = 'date'
elif self.is_string:
base = 'string'
elif self.is_value:
base = 'value'
elif self.is_fit_type:
if self.is_switched:
base = 'multi'
else:
base = '{}'.format( self.fit_type)
if self.array_size:
base = base + '[{}]'.format( self.array_size )
return base
def __repr__(self):
if self.is_switched:
return 'Field({}={}<{}>, {}, switch{{{}}})'.format(self.name, self.field_num, self.base_type, self.type_category(), len(self.references) )
else:
if self.field_num:
return 'Field({}={}<{}>, {})'.format(self.name, self.field_num, self.base_type, self.type_category() )
else:
return 'Field({}<{}>, {})'.format(self.name, self.base_type, self.type_category() )
def base_type_alignment(self):
if self.base_type in base_type_alignments:
alignment = base_type_alignments[self.base_type]
else:
print( "MISSING ALIGNMENT {}".format( self.base_type ) )
exit(0)
if self.array_size and (self.is_array or self.is_string):
total_size = self.array_size * alignment
if total_size % 4 == 0:
alignment = 4
elif total_size % 2 == 0:
alignment = 2
else:
total_size = 1
return alignment
def description(self):
rv = [ repr(self) ]
print( self.references)
if self.references:
for field in self.references:
refs = ','.join(list(set(field.reference_field)))
rv.append( 'switch({}): {}'.format( refs, field ) )
return '\n'.join( rv )
def formula(self):
if self.unit:
return '({}x+{}) in [{}]'.format( self.multiplier, self.offset, self.unit )
else:
return ''
def name_to_units(self):
rv = {}
if self.unit:
rv = { self.name: self.unit}
for references in self.references:
sub = references.name_to_units()
for (k,v) in sub.items():
if k not in rv:
rv[k] = v
else:
if rv[k] != v:
print( 'inconsistent for {}: {} {}'.format( self.name, v, rv[k] ) )
return rv
#--- swift field
def swift_unit_case_statement(self,prefix=''):
if self.unit:
return [ prefix + 'case "{}": return "{}"'.format( self.member,self.unit ) ]
else:
return None
def swift_stmt_convert_value(self,ctx,message,prefix=''):
lines = []
member = self.member
array_access = ''
if self.is_array and self.array_size > 1:
array_access = '.0'
if self.is_value:
lines = first_line_with_annotate_comment(prefix,ctx.annotate)
lines.extend( [ prefix + 'if x.{}{} != {}_INVALID {{'.format( member, array_access, self.objc_base_type ) ] )
if self.is_switched:
lines.extend( self.swift_stmt_case_convert_to_value(ctx, message) )
else:
if self.is_array:
lines.append( prefix + ' // Array[{}]'.format( self.array_size ) )
formula = self.swift_expr_formula(ctx)
lines.extend( [ prefix + ' let val : Double = {}'.format( formula ),
prefix + ' rv[ "{}" ] = val'.format(self.name),
] )
lines.append( prefix + '}' )
return lines
def swift_stmt_convert_string(self,ctx,message,prefix=' '):
lines = []
if self.is_string or self.is_fit_type:
if self.is_fit_type and not self.is_array:
lines = first_line_with_annotate_comment(prefix,ctx.annotate)
lines.extend( [ prefix + 'if( x.{} != {}_INVALID ) {{'.format( self.member, self.objc_base_type ) ] )
if self.is_switched:
lines.extend( self.swift_stmt_case_convert_to_string(ctx,message) )
else:
type_obj = ctx.types[self.type_name]
lines.extend( [
prefix + ' rv[ "{}" ] = {}(x.{})'.format( self.member,type_obj.swift_fname_to_string(), self.member ),
])
lines.append( prefix + '}' )
elif self.is_string:
lines = first_line_with_annotate_comment(prefix,ctx.annotate)
lines.extend( [ prefix + 'let {} = withUnsafeBytes(of: &x.{}) {{ (rawPtr) -> String in'.format(self.member,self.member),
prefix + ' let ptr = rawPtr.baseAddress!.assumingMemoryBound(to: CChar.self)',
prefix + ' return String(cString: ptr)',
prefix + '}',
prefix + 'if !{}.isEmpty {{'.format( self.member, self.member ),
prefix + ' rv[ "{}" ] = {}'.format( self.member, self.member ),
prefix + '}',
] )
return lines
def swift_stmt_convert_date(self,ctx,message,prefix=''):
lines = []
member = self.member
if self.is_date:
lines = first_line_with_annotate_comment(prefix,ctx.annotate)
lines.extend( [ prefix + 'if x.{} != {}_INVALID {{'.format( member, self.objc_base_type ),
prefix + ' let val : Date = Date(timeIntervalSinceReferenceDate: Double(x.{})-347241600.0 )'.format( member ),
prefix + ' rv[ "{}" ] = val'.format(self.name),
prefix + '}'
] )
return lines
def swift_stmt_case_convert_to_value(self,ctx,message):
rv = []
if self.references:
rv = first_line_with_annotate_comment(' ',ctx.annotate)
if_statement = 'if'
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
for (onefield, oneval) in zip( r.reference_field, r.reference_field_value ):
ref_type_obj = message.type_for_field(ctx,onefield)
formula = self.swift_expr_formula(ctx)
rv.extend( [ ' {} x.{} == {} {{ // {}'.format( if_statement, onefield, ref_type_obj.value_for_string(oneval), oneval ),
' let val : Double = {}'.format( formula ),
' rv[ "{}" ] = val'.format( r.name ),
] )
if_statement = '}else if'
if if_statement != 'if':
rv.append( ' }else{' )
formula = self.swift_expr_formula(ctx)
rv.extend( [ ' let val : Double = {}'.format( formula ),
' rv[ "{}" ] = val'.format( self.name ),
' }',
] )
return rv
def swift_stmt_case_convert_to_string(self,ctx,message):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
if self.references:
if_statement = 'if'
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
if r.name in ctx.types:
r_type_obj = ctx.types[r.name]
for (onefield, oneval) in zip( r.reference_field, r.reference_field_value ):
ref_type_obj = message.type_for_field(ctx,onefield)
rv.extend( [ ' {} x.{} == {} {{ // {}'.format( if_statement, onefield, ref_type_obj.value_for_string(oneval), oneval ),
' rv[ "{}" ] = {}({}(truncatingIfNeeded: x.{}))'.format( r.name,r_type_obj.swift_fname_to_string(), r_type_obj.objc_type(), self.name ),
] )
if_statement = '}else if'
if if_statement != 'if':
rv.append( ' }' )
return rv
def swift_stmt_case_to_string(self,ctx,message):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
if self.references:
rv.extend( [ ' case {}:'.format( self.field_num )] )
if_statement = 'if'
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
for (onefield, oneval) in zip( r.reference_field, r.reference_field_value ):
rv.extend( [ ' {} strings["{}"] == "{}" {{'.format( if_statement, onefield, oneval ) ,
' return "{}"'.format( r.name ) ] )
if_statement = '}else if'
if if_statement == 'if':
rv.append( ' }' )
else:
if self.switch_require_complete:
rv.extend( [' }else{',
' return "__INCOMPLETE__"'.format( self.name ),
' }'
])
else:
rv.extend( [' }else{',
' return "{}"'.format( self.name ),
' }'
])
else:
rv.extend( [' case {}: return "{}"'.format(self.field_num, self.name ) ] )
return rv
def swift_stmt_case_reverse_value(self,ctx,message):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
rv.extend( [
' case "{}": // {}'.format( self.name, self.type_name),
])
if self.type_name == 'date_time':
rv.append( ' guard let dbl : Double = Double(value) else { return .unknown }' )
rv.append( ' let dat : Date = Date(timeIntervalSinceReferenceDate: dbl-347241600.0 )' )
rv.append( ' return .date(dat)' )
elif self.type_name in ctx.types:
ttype = ctx.types[ self.type_name ]
rv.append( ' return {}(value: value)'.format( ttype.swift_fname_reverse_value() ) )
else:
rv.append( ' guard let dbl : Double = Double(value) else { return .unknown }' )
rv.append( ' return .value(dbl)' )
if self.references:
for r in self.references:
rv.extend( r.swift_stmt_case_reverse_value(ctx,message) )
return rv
#---- objc field
def objc_stmt_build_references_variables(self,ctx,message):
all_var = dict()
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
for one in r.reference_field:
all_var[ one ] = message.fields_map[one].field_num
rv = first_line_with_annotate_comment(' ',ctx.annotate)
for one,field_num in all_var.items():
rv.append( ' FIT_UINT32 {} = fit_interp_string_value(interp, {});'.format( one, field_num ) )
return rv;
def objc_stmt_case_to_string(self,ctx,message):
if self.references:
rv = [ ' case {}:'.format( self.field_num ),
' {' ]
if_statement = 'if'
rv.extend( self.objc_stmt_build_references_variables(ctx,message) );
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
for (onefield, oneval) in zip( r.reference_field, r.reference_field_value ):
rv.extend( [ ' {}( {} == {} ){{ // {} '.format( if_statement, onefield, message.type_for_field(ctx,onefield).value_for_string(oneval),oneval ),
' return @"{}";'.format( r.name ),
] )
if_statement = '}else if'
if if_statement == 'if':
rv.append( ' }' )
else:
rv.extend( [ ' }else{',
' return @"{}";'.format( self.name ),
' }'
])
rv.append( ' }' )
return rv
else:
return [ ' case {}: return @"{}";'.format( self.field_num, self.name ) ]
def objc_expr_fit_field_info(self,ctx):
rv = None
scale = 0
offset = 0
unit = 0
fit_type = 0
flags = 0
report = False
if self.scale and isinstance(self.scale, int ):
scale = self.scale
report = True
if self.offset:
offset = self.offset
report = True
if self.type_name and self.type_name in ctx.types:
fit_type = ctx.types[ self.type_name ].type_num
if self.type_name == 'date_time' or self.type_name == 'local_date_time':
flags = 1
report = True
if self.unit and self.unit in ctx.units:
unit = ctx.units[ self.unit ]
report = True
if report:
rv = '(FIT_FIELD_INFO){{.scale = {}, .offset = {}, .fit_type = {}, .fit_unit = {}, .fit_flag = {} }}'.format( scale,offset,fit_type,unit,flags )
return rv
def objc_stmt_case_to_field_info(self,ctx,message):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
if self.references:
rv.extend( [ ' case {}: // {}'.format( self.field_num, self.name ),
' {',
] )
if_statement = 'if'
rv.extend( self.objc_stmt_build_references_variables(ctx,message) )
for r in self.references:
if not r.reference_field:
print( 'bug', self.name, r.name )
fit_field_info = r.objc_expr_fit_field_info(ctx)
if fit_field_info:
for (onefield, oneval) in zip( r.reference_field, r.reference_field_value ):
rv.extend( [ ' {}( {} == {} ){{ // {} '.format( if_statement, onefield, message.type_for_field(ctx,onefield).value_for_string(oneval), oneval ),
' return {};'.format( fit_field_info ),
] )
if_statement = '}else if'
if if_statement != 'if':
rv.append( ' }' )
if self.is_value:
rv.append( ' return {};'.format( self.objc_expr_fit_field_info(ctx) ) )
else:
rv.append( ' return (FIT_FIELD_INFO){.scale = 0, .offset = 0, .fit_type = FIT_TYPE_PENDING, .fit_unit = 0, .fit_flag = 0 };' )
rv.append( ' }' )
else:
fit_field_info = self.objc_expr_fit_field_info(ctx)
if fit_field_info:
rv.append( ' case {}: return {}; // {}'.format( self.field_num, fit_field_info, self.name ) )
return rv
def swift_expr_formula(self,ctx):
if self.is_array and self.array_size > 1:
formula = 'Double(x.{}.0)'.format(self.name)
else:
formula = 'Double(x.{})'.format(self.name)
# ignore scale that are multi field ex: compressed_speed_distance = 100,16
if self.scale and ',' not in str(self.scale) and float(self.scale) != 1.0:
formula = '({}/Double({}))'.format( formula, self.scale )
if self.offset and float(self.offset) != 0.0:
formula += '-Double({})'.format(self.offset)
return formula
class Message:
'''
A message name comes from the Messages tab of profile.xlsx
It contains a name, which should be match in the type mesg_num
and a list of fields definition for the message
name: name of the message (ex: file_id, record, ..)
mesg_num: the mesg num number from the type definition (ex: 18 (session), 20 (record)...
struct_name: objc type for structure (ex: FIT_FILE_ID_MESG, FIT_RECORD_MESG)
fields: array of fields in order of the Profile read [Field(file_id),...Field(record)...]
fields_map: name to field {'file_id': Field(file_id), ...}
'''
def __init__(self,ctx,name):
self.name = name
self.mesg_num = ctx.types['mesg_num'].value_for_string( name )
self.fields = []
self.fields_map = {}
self.struct_name = 'FIT_{}_MESG'.format( self.name.upper() )
def __repr__(self):
return( 'Message({}={})[{}]'.format( self.name, self.mesg_num, len( self.fields ) ) )
def add(self,ctx,row):
if row[1] is not None:
field = Field( ctx,row )
self.fields.append( field )
self.fields_map[ field.name ] = field
elif len(self.fields)>0:
self.fields[-1].add_reference(ctx,row)
def type_for_field(self,ctx,field_name):
# field ex: manufacturer
# return type for that field
field = self.fields_map[field_name]
return ctx.types[ field.type_name ]
def has_switched_field(self):
rv = False
for f in self.fields:
if f.is_switched:
rv = True
return rv
def has_included(self):
rv = False
for f in self.fields:
if f.include:
rv = True
return rv
def field_to_unit(self,all_fields):
for f in self.fields:
for (k,v) in f.name_to_units().items():
if k not in all_fields:
all_fields[k] = {}
all_fields[k][self.name] = v
def fields_sorted_by_alignments(self):
rv = sorted( self.fields, key=lambda x: x.base_type_alignment(), reverse=True )
return rv
def objc_type_mesg_def_struct(self):
return 'FIT_{}_MESG_DEF'.format( self.name.upper() )
def objc_var_mesg_def(self):
return '{}_mesg_def'.format( self.name )
def mesg_def_struct_type_name(self):
return 'FIT_{}_MESG_DEF'.format( self.name.upper() )
#--- Swift message
def swift_fname_field_num_to_string(self):
return 'rzfit_swift_field_num_to_string_for_{}'.format( self.name )
def swift_func_field_num_to_string(self,ctx):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
if self.has_switched_field():
rv.append( 'fileprivate func {}( field_num : FIT_UINT16 , strings : [String:String] ) -> String {{'.format( self.swift_fname_field_num_to_string() ) )
else:
rv.append( 'fileprivate func {}( field_num : FIT_UINT16 ) -> String {{'.format( self.swift_fname_field_num_to_string() ) )
rv.append( ' switch field_num {' )
for field in self.fields:
rv.extend( field.swift_stmt_case_to_string(ctx,self ) )
rv.extend( [ ' default: return "{}_field_num_\(field_num)"'.format( self.name ),
' }',
'}'
])
return rv
def swift_fname_value_dict(self):
return 'rzfit_swift_value_dict_for_{}'.format( self.name )
def swift_func_value_dict(self,ctx):
rv = first_line_with_annotate_comment('',ctx.annotate)
rv.extend( [ 'fileprivate func {}( ptr : UnsafePointer<{}>) -> [String:Double] {{'.format( self.swift_fname_value_dict(), self.struct_name ) ] )
elems = []
for field in self.fields_sorted_by_alignments():
if field.include:
elems += field.swift_stmt_convert_value(ctx, self, ' ')
if elems:
rv += [ ' var rv : [String:Double] = [:]',
' let x : {} = ptr.pointee'.format(self.struct_name)
]
rv += elems
rv += [ ' return rv',
'}' ]
else:
rv += [ ' return [:]',
'}' ]
return rv
def swift_fname_string_dict(self):
return 'rzfit_swift_string_dict_for_{}'.format( self.name )
def swift_func_string_dict(self,ctx):
rv = first_line_with_annotate_comment('',ctx.annotate)
rv.extend( [ 'fileprivate func {}( ptr : UnsafePointer<{}>) -> [String:String] {{'.format(self.swift_fname_string_dict(), self.struct_name ) ] )
elems = []
hasString = False
for field in self.fields_sorted_by_alignments():
if field.include:
if field.is_string:
hasString = True
elems += field.swift_stmt_convert_string(ctx,self)
if elems:
rv += [ ' var rv : [String:String] = [:]',
' {} x : {} = ptr.pointee'.format('var' if hasString else 'let', self.struct_name)
]
rv += elems
rv += [ ' return rv',
'}' ]
else:
rv += [ ' return [:]',
'}'
]
return( rv )
def swift_fname_date_dict(self):
return 'rzfit_swift_date_dict_for_{}'.format( self.name )
def swift_func_date_dict(self,ctx):
rv = first_line_with_annotate_comment('',ctx.annotate)
rv.extend( [ 'fileprivate func {}( ptr : UnsafePointer<{}>) -> [String:Date] {{'.format( self.swift_fname_date_dict(), self.struct_name ),
] )
elems = []
for field in self.fields:
if field.include:
elems += field.swift_stmt_convert_date(ctx, self, ' ')
if elems:
rv += [ ' var rv : [String:Date] = [:]',
' let x : {} = ptr.pointee'.format(self.struct_name)
]
rv += elems
rv += [ ' return rv',
'}' ]
else:
rv += [ ' return [:]',
'}' ]
return rv
def swift_stmt_case_fit_mesg(self,ctx):
rv = first_line_with_annotate_comment(' ',ctx.annotate)
rv.extend( [ ' case {}: // {}'.format( self.mesg_num, self.name ),
' uptr.withMemoryRebound(to: {}.self, capacity: 1) {{'.format( self.struct_name ),
' rv = FitMessage( mesg_num: {},'.format( self.mesg_num ),
' mesg_values: {}(ptr: $0),'.format( self.swift_fname_value_dict()),
' mesg_enums: {}(ptr: $0),'.format( self.swift_fname_string_dict()),
' mesg_dates: {}(ptr: $0))'.format( self.swift_fname_date_dict()),
' }'
] )
return rv
def swift_fname_reverse_value(self):
return 'rzfit_swift_reverse_value_{}'.format( self.name )
def swift_func_reverse_value(self,ctx):
rv = first_line_with_annotate_comment('', ctx.annotate)
rv.extend( [
'fileprivate func {}(field: String, value: String) -> RzFitSwiftValue {{'.format( self.swift_fname_reverse_value() ),
' switch field {'
])
for field in self.fields:
rv.extend( field.swift_stmt_case_reverse_value(ctx,self) )
rv.extend( [
' default:',
' return .unknown',
' }',
'}'
])
return rv
#--- objc message
def objc_fname_field_num_to_string(self):
return 'rzfit_objc_field_num_to_string_for_{}'.format( self.name )
def objc_func_field_num_to_string(self,ctx):
rv = first_line_with_annotate_comment('',ctx.annotate)
if self.has_switched_field():
rv.append( 'static NSString * {}( FIT_UINT8 field_num, FIT_INTERP_FIELD * interp ){{'.format( self.objc_fname_field_num_to_string() ) ),
else:
rv.append( 'static NSString * {}( FIT_UINT8 field_num ){{'.format( self.objc_fname_field_num_to_string() ) )
rv.append( ' switch( field_num ){' )
for field in self.fields:
rv.extend( field.objc_stmt_case_to_string(ctx,self) )
rv.extend( [ ' default: return [NSString stringWithFormat:@"{}_field_num_%u", (unsigned int)field_num];'.format( self.name) ,
' }',
'}',
'',
] )
return rv
def objc_fname_field_info(self):
return 'rzfit_objc_field_info_for_{}'.format( self.name )
def objc_func_field_info(self, ctx ):
needed = []
for f in self.fields:
one = f.objc_stmt_case_to_field_info(ctx,self)
if one:
needed.extend( one )
rv = first_line_with_annotate_comment('',ctx.annotate)
if needed:
if self.has_switched_field():
rv.append( 'static FIT_FIELD_INFO {}(FIT_UINT16 field, FIT_INTERP_FIELD * interp){{'.format( self.objc_fname_field_info() ) )
else:
rv.append( 'static FIT_FIELD_INFO {}(FIT_UINT16 field){{'.format( self.objc_fname_field_info() ) )
rv.append( ' switch( field ){' ),
rv.extend( needed )
rv.extend( [ ' default: return (FIT_FIELD_INFO){.scale = 0, .offset = 0, .fit_type = 0, .fit_unit = 0, .fit_flag = 0 };',
' }',
'}',
] )
return rv
def objc_mesg_struct(self,ctx):
rv = []
fields = []
for f in self.fields_sorted_by_alignments():
if f.include:
fields.append( f )
if not fields:
return rv
rv = first_line_with_annotate_comment('',ctx.annotate)
rv.extend( [ 'typedef struct {' ] )
for f in fields:
if f.array_size:
rv.append( ' {} {}[{}]; // {}'.format( f.objc_base_type, f.name, f.array_size, f.fit_type.name if f.fit_type else '' ) )
else:
rv.append( ' {} {}; // {}'.format( f.objc_base_type, f.name, f.fit_type.name if f.fit_type else '' ) )
rv.extend( ['}} {};'.format( self.struct_name ), '' ] )
return rv
def objc_mesg_def(self,ctx):
rv = []
fields = []
for f in self.fields_sorted_by_alignments():
if f.include:
fields.append( f )
if not fields:
return rv
rv = first_line_with_annotate_comment('',ctx.annotate)
rv.extend( [ 'typedef struct {',
' FIT_UINT8 reserved_1;',
' FIT_UINT8 arch;',
' FIT_UINT16 global_mesg_num;',
' FIT_UINT8 num_fields;',
' FIT_UINT8 fields[FIT_FIELD_DEF_SIZE * {:2}];'.format( len( fields ) ),
'}} {};'.format( self.objc_type_mesg_def_struct() ),
''
] )
rv.extend( [ 'static const {} {} = {{'.format(self.objc_type_mesg_def_struct(), self.objc_var_mesg_def()),
' 0, // reserved_1',
' FIT_ARCH_ENDIAN, // arch,',
' /* {} */{}, // mesg_num,'.format( self.name, self.mesg_num ),
' {},'.format( len(fields) ),
' {'
] )
sizes = (0, 0, 0)
entries = []
for f in fields:
base_type = 'FIT_BASE_TYPE_{}'.format( f.base_type.upper() )
if base_type == 'FIT_BASE_TYPE_BOOL':
base_type = 'FIT_BASE_TYPE_ENUM'
one = [ '/* {} */{},'.format( f.name, f.field_num ),
'(sizeof({})*{}),'.format( f.objc_base_type, f.array_size if f.array_size else 1 ),
'{},'.format( base_type )
]
entries.append( one )
sizes = ( max(len(one[0]), sizes[0]), max(len(one[1]), sizes[1]), max(len(one[2]), sizes[1]) )
for one in entries:
rv.append( ' ' + '{0: <{width0}} {1: <{width1}} {2: <{width2}}'.format( one[0], one[1], one[2], width0 = sizes[0], width1 = sizes[1], width2 = sizes[2] ) )
rv.extend( [ ' }',
'};'
] )
return rv
class Profile:
'''
units: dict name to internal unit name (ex: { 'bpm': 1 } )
types: dict name to Type object (ex: { 'garmin_product': Type(garmin_product) } )
messages: dict name to Message object (ex: { 'mesg_num' : Message(mesg_num) } )
structs: dict of fit type to Struct defined in c (ex: { 'FIT_RECORD_MESG' : Struct(record) } )
'''
def __init__(self,path_to_profile, types=None, messages=None, fields=None, verbose=True, annotate=False):
'''
path_to_profile: path to Profile.xlsx from the Fit SDK
types: array of type names to focus on for generation function or None for all (ex: ['sport','mesg_num'...])
messages: array of messages to focus on or None for all (ex: ['record','session',...])
verbose: flag to suppress output of progresses
'''
self.profile = path_to_profile
self.verbose = verbose
self.annotate = annotate
self.focus_types = types
self.focus_messages = messages
self.focus_fields = fields
self.parse_profile_excel()
def parse_profile_excel(self):
logging.info( 'Parsing {}'.format( self.profile ) )
wb = openpyxl.load_workbook(filename=self.profile)
ws_types = list(wb['Types'].values)
self.types = {}
current = None
for row in ws_types[1:]:
if len(row)>0 and row[0] and row[1]:
# len+1 so 0 means no type
current = Type( row[0], row[1], len(self.types)+1, annotate=self.annotate )
self.types[ current.name ] = current
elif current:
# special case with duplicated number, breaks switch
if row[4] and row[4].startswith('Deprecated' ) and row[2] == 'forecast':
continue
current.add_row( row )
logging.info( 'Read {} types'.format( len(self.types ) ) )
ws_messages = list(wb['Messages'].values)
self.messages = {}
current = None
self.units = {}
for row in ws_messages[1:]:
if row[0]:
current = Message(self,row[0])
self.messages[ current.name ] = current
elif current and row[2]:
current.add( self,row )
if self.verbose:
logging.info( 'Read {} messages'.format( len(self.messages ) ) )
logging.info( 'Read {} units'.format( len(self.units ) ) )
def arg_types(self):
rv = []
if self.types:
for i in self.focus_types:
if i in self.types:
rv.append( self.types[i] )
elif int(i) > 0:
for t in self.types.values():
if int(i) == int(t.type_num):
rv.append( t )
else:
rv = [self.types[x] for x in self.ordered_types()]
return rv
def arg_messages(self):
rv = []
if self.focus_messages:
for i in self.focus_messages:
if i in self.messages:
rv.append( self.messages[i] )
elif int(i) > 0:
for m in self.messages.values():
if int(m.mesg_num) == int(i):
rv.append( m )
else:
rv = self.messages.values()
return rv
def arg_fields(self,message):
rv = []
if self.focus_fields:
for i in self.focus_fields:
if i in message.fields_map:
rv.append( message.fields_map[i] )
elif int(i) > 0:
for f in message.fields:
if int(f.field_num) == int(i):
rv.append( f )
else:
rv = message.fields
return rv
def unit_num( self, unit_name ):
if not unit_name:
return 0
if unit_name not in self.units:
self.units[ unit_name ] = len( self.units ) + 1
return self.units[ unit_name ]
def ordered_types(self):
ordered = sorted( self.types.keys(), key=lambda x: self.types[x].type_num )
return ordered
#---- objc context
def objc_fname_field_info(self):
return 'rzfit_objc_field_info'
def objc_func_field_info(self):
rv = first_line_with_annotate_comment('',self.annotate)
rv.extend( [ 'FIT_FIELD_INFO {}( FIT_UINT16 global_mesg_num, FIT_UINT16 field, FIT_INTERP_FIELD * interp ){{'.format( self.objc_fname_field_info() ),
' switch(global_mesg_num){',
] )
for message in self.messages.values():
mesg_num = message.mesg_num
mesg_name = message.name
if message.has_switched_field():
rv.append( ' case {}: return {}(field,interp);'.format( mesg_num, message.objc_fname_field_info() ) )
else:
rv.append( ' case {}: return {}(field);'.format( mesg_num, message.objc_fname_field_info() ) )
rv.extend( [ ' default: return (FIT_FIELD_INFO){.scale = 0, .offset = 0, .fit_type = 0, .fit_unit = 0, .fit_flag = 0 };',
' }',
'}'
] )
return rv
def objc_fname_unit_to_string(self):
return 'rzfit_objc_unit_to_string'
def objc_func_unit_to_string(self):
rv = first_line_with_annotate_comment('',self.annotate)
rv.extend( [ 'NSString * {}( FIT_UNIT fit_unit ){{'.format( self.objc_fname_unit_to_string() ),
' switch( fit_unit ){'
] )
ordered = sorted( self.units.keys(), key=lambda x: self.units[x] )
for k in ordered:
rv.append( ' case {}: return @"{}";'.format( self.units[k], k.replace( '\n','' ) ) )
rv.extend( [ ' default: return [NSString stringWithFormat:@"FIT_UNIT_%u", (unsigned int)fit_unit];' ,
' }',
'}',
''
] )
return rv
def objc_fname_type_to_string(self):
return 'rzfit_objc_type_to_string'
def objc_func_type_to_string(self):
rv = first_line_with_annotate_comment('',self.annotate)
rv.extend( [ 'NSString * {}( FIT_TYPE fit_type, FIT_UINT32 val ){{'.format( self.objc_fname_type_to_string() ),
' switch( fit_type ){'
] )
ordered = self.ordered_types()
for k in ordered:
rv.extend( self.types[k].objc_stmt_case_type_function_call() )
rv.extend( [ ' default: return [NSString stringWithFormat:@"FIT_TYPE_%u_VALUE_%u", (unsigned int)fit_type, (unsigned int)val];' ,
' }',
'}',
''
] )
return rv
def objc_fname_field_num_to_string(self):
return 'rzfit_objc_field_num_to_string'
def objc_func_field_num_to_string(self):
mesg_num = self.types['mesg_num']
rv = first_line_with_annotate_comment('',self.annotate)
rv.extend( [ 'NSString * {}( FIT_UINT16 global_mesg_num, FIT_UINT16 field, FIT_INTERP_FIELD * interp ){{'.format( self.objc_fname_field_num_to_string() ),
' switch( global_mesg_num ){'
] )
for t in mesg_num.values:
mesg_name = t['name']
if mesg_name not in self.messages:
if self.verbose:
logging.debug( 'Message {} in mesg_num type has no definition, skipping for objc'.format( mesg_name ) )
else:
mesg = self.messages[ mesg_name ]
if mesg.has_switched_field():
rv.append( ' case {}: return {}(field,interp);'.format( t['value'], mesg.objc_fname_field_num_to_string() ) )
else:
rv.append( ' case {}: return {}(field);'.format( t['value'], mesg.objc_fname_field_num_to_string() ) )
rv.extend( [ ' default: return [NSString stringWithFormat:@"MESG_NUM_%u_FIELD_%u", (unsigned int)global_mesg_num, (unsigned int)field];' ,
' }',
'}',
''
] )
return rv
def objc_var_fit_mesg_defs(self):
return 'reference_mesg_defs'
def objc_fit_mesg_defs_forward_declare(self):
rv = []
messages = []
for m in self.arg_messages():
if m.has_included():
messages.append( m )
rv.extend( [ 'extern void fit_set_{}();'.format( self.objc_var_fit_mesg_defs(), len(messages) ),
''
] )
return rv
def objc_fit_mesg_defs(self):
rv = first_line_with_annotate_comment('',self.annotate)
messages = []
for m in self.arg_messages():
if m.has_included():
messages.append( m )
if messages:
rv.extend( [ 'FIT_UINT8 {}_size = {};'.format( self.objc_var_fit_mesg_defs(), len( messages ) ),
'FIT_CONST_MESG_DEF_PTR {}[] = {{'.format( self.objc_var_fit_mesg_defs() )
] )
for m in messages:
rv.append( ' (FIT_CONST_MESG_DEF_PTR) &{},'.format( m.objc_var_mesg_def() ) )
rv.extend( [ '};', '' ] )
rv.extend( ['void fit_set_{}() {{'.format( self.objc_var_fit_mesg_defs(), len(messages) ),
' Fit_SetMesgDefs({}, {}_size);'.format( self.objc_var_fit_mesg_defs(), self.objc_var_fit_mesg_defs() ),
'}',
] )
return rv
#--- swift Profile
def swift_unit_functions(self):
rv = first_line_with_annotate_comment(' ',self.annotate)
rv = [ 'func rzfit_swift_known_units( ) -> [String] {' ,
' return ['
]
for k in self.units.keys():
rv.append( ' "{}",'.format( k.replace( '\n', '' ) ) )
rv.extend( [ ' ]' ,
'}',
''
] )
rv.extend( [ 'func rzfit_swift_unit_for_field( mesg_num : FIT_UINT16, field : String ) -> String? {',
' switch field {'
] )
field_to_unit = {}
mesg_num = self.types['mesg_num']
for (name,message) in self.messages.items():
message.field_to_unit(field_to_unit)
for (f,defs) in field_to_unit.items():
units = set(defs.values())
if len( units ) == 1:
rv.append( ' case "{}": return "{}"'.format( f, next(iter(units))).replace( '\n','' ) )
else:
rv.extend( [ ' case "{}": '.format( f ),
' switch mesg_num {',
] )
for (m,u) in defs.items():
rv.append( ' case {}: return "{}" // {}'.format( mesg_num.values_map[m], u, m ) )
rv.extend( [ ' default: return nil',
' }'
] )
rv.extend( [ ' default: return nil',
' }',
'}' ] )
return rv
def swift_fname_type_to_string(self):
return 'rzfit_swift_string_for_type'
def swift_func_type_to_string(self):
rv = [ 'func {}(fit_type : FIT_UINT8, val : FIT_UINT32 ) -> String {{'.format( self.swift_fname_type_to_string() ),
' switch fit_type {'
]
ordered = self.ordered_types()
for k in ordered:
rv.extend( self.types[k].swift_stmt_case_type_function_call() )
rv.extend( [ ' default: return "fit_type_\(fit_type)_\(val)"',
' }',
'}' ] )
return rv
def swift_fname_reverse_value(self):
return 'rzfit_swift_reverse_value'
def swift_func_reverse_value(self):
rv = first_line_with_annotate_comment('', self.annotate)
rv.extend( [
'public enum RzFitSwiftValue {',
' case string(String)',
' case value(Double)',
' case date(Date)',
' case unknown',
'}',
'',
'public func {}(mesg: String, field: String, value: String) -> RzFitSwiftValue {{'.format( self.swift_fname_reverse_value() ),
' switch mesg {'
])
for mesg in self.types['mesg_num'].values:
if mesg['name'] in self.messages:
message = self.messages[ mesg['name'] ]
rv.extend( [ ' case "{}": // {}'.format( mesg['name'], mesg['value'] ),
' return {}(field: field, value: value )'.format(message.swift_fname_reverse_value()),
] )
rv.extend( [
' default:',
' return .unknown',
' }',
'}'
])
return rv
def swift_fname_field_num_to_string(self):
return 'rzfit_swift_field_num_to_string'
def swift_func_field_num_to_string(self):
rv = first_line_with_annotate_comment('',self.annotate)
mesg_num = self.types['mesg_num']
rv.extend( [ 'func {}( mesg_num : FIT_UINT16, field_num : FIT_UINT16, strings : [String:String]) -> String {{'.format( self.swift_fname_field_num_to_string() ),
' switch mesg_num {'
] )
for t in mesg_num.values:
mesg_name = t['name']
if mesg_name not in self.messages:
logging.debug( 'Message {} in mesg_num type not defined, skipping for swift'.format( mesg_name ) )
else:
mesg = self.messages[mesg_name]
if mesg.has_switched_field():
rv.append( ' case {}: return {}(field_num: field_num, strings: strings)'.format( t['value'], mesg.swift_fname_field_num_to_string() ) )
else:
rv.append( ' case {}: return {}(field_num: field_num)'.format( t['value'], mesg.swift_fname_field_num_to_string() ) )
rv.extend( [ ' default: return "mesg_num_\(mesg_num)_field_num_\(field_num)"',
' }',
'}' ] )
return rv
def swift_func_messages_dict(self):
rv = first_line_with_annotate_comment(' ',self.annotate)
messages = self.arg_messages()
for s in messages:
if s.has_included():
rv.extend( s.swift_func_value_dict(self) )
rv.extend( s.swift_func_string_dict(self) )
rv.extend( s.swift_func_date_dict(self) )
return rv
def swift_fname_build_mesg(self):
return 'rzfit_swift_build_mesg'
def swift_func_build_mesg(self):
rv = first_line_with_annotate_comment(' ',self.annotate)
rv.extend( [
'func {}(mesg_num : FIT_MESG_NUM, uptr : UnsafePointer<UInt8>) -> FitMessage? {{'.format(self.swift_fname_build_mesg()),
' var rv : FitMessage? = nil',
' switch mesg_num {',
])
ordered = sorted(self.arg_messages(), key=lambda x: x.mesg_num)
for message in ordered:
if message.has_included():
rv.extend( message.swift_stmt_case_fit_mesg(self) )
rv.extend( [
' default:',
' rv = FitMessage( mesg_num: mesg_num, mesg_values: [:], mesg_enums: [:], mesg_dates: [:])',
' }',
' return rv',
'}'
] )
return rv
class Command :
def __init__(self,args):
self.args = args
if self.args.quiet:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING )
else:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
self.context = Profile(args.profile,messages=args.message,fields=args.field,types=args.type,annotate=args.annotate)
def generate_swift_reverse_file(self):
swift_dir = self.args.swiftdir
swift_file_name = os.path.join( swift_dir, 'rzfit_swift_reverse_map.swift' )
logging.info( 'Writing {}'.format( swift_file_name ) )
oof = open( swift_file_name, 'w' )
rv = [
'// This file is auto generated, Do not edit',
'',
'import FitFileParserObjc'
]
rv.extend( self.context.swift_func_reverse_value() )
for (n,m) in self.context.messages.items():
rv.extend( m.swift_func_reverse_value(self.context) )
for one in self.context.types.values():
rv.extend( one.swift_func_reverse_value() )
oof.write( '\n'.join( rv ) )
def generate_swift_file(self):
swift_dir = self.args.swiftdir
swift_file_name = os.path.join( swift_dir, 'rzfit_swift_map.swift' )
logging.info( 'Writing {}'.format( swift_file_name ) )
oof = open( swift_file_name, 'w' )
rv = [
'// This file is auto generated, Do not edit',
'',
'import FitFileParserObjc'
]
if os.path.isfile( 'fitsdkversion.txt' ):
with open( 'fitsdkversion.txt', 'r' ) as vf:
version = vf.readline().rstrip()
rv.extend( [
'',
'extension FitFile {',
' public static let sdkVersion = "{}"'.format( version ),
'}'
] )
rv.extend( [
'',
'//MARK: - Module Entry Point Functions',
''
] )
mesg_num = self.context.types['mesg_num']
rv.extend( self.context.swift_func_build_mesg() )
rv.extend( self.context.swift_unit_functions() )
rv.extend( self.context.swift_func_type_to_string() )
rv.extend( mesg_num.swift_func_from_string(fileprivate=False) )
rv.extend( mesg_num.swift_func_to_string(fileprivate=False) )
rv.append( '// MARK: - Extension' )
rv.extend( mesg_num.swift_stmt_extension('FitMessageType' ) )
rv.extend( [
'',
'//MARK: - convertion fittype to string functions',
''
] )
for one in self.context.types.values():
if one.name != 'mesg_num':
rv.extend( one.swift_func_to_string() )
rv.extend( [
'',
'//MARK: - fit convert structure to dict',
''
] )
for (n,m) in self.context.messages.items():
rv.extend( m.swift_func_field_num_to_string(self.context) )
rv.extend( self.context.swift_func_field_num_to_string() )
rv.extend( [
'',
'//MARK: - fit build messages dict ',
''
] )
rv.extend( self.context.swift_func_messages_dict() )
oof.write( '\n'.join( rv ) )
def generate_objc_mesg_def(self):
objc_dir = self.args.objcdir
objc_file_name = os.path.join( objc_dir, 'rzfit_objc_reference_mesg.m' )
objc_header = 'rzfit_objc_reference_mesg.h'
objc_header_name = os.path.join( objc_dir, objc_header )
logging.info( 'Writing {}'.format( objc_file_name ) )
oof = open( objc_file_name, 'w')
rv = [
'// This file is auto generated, Do not edit',
'',
'#include "{}"'.format( objc_header ),
'',
]
messages = self.context.arg_messages()
for m in messages:
rv.extend( m.objc_mesg_def(self.context) )
rv.extend( self.context.objc_fit_mesg_defs() )
oof.write( '\n'.join( rv ) )
logging.info( 'Writing {}'.format( objc_header_name ) )
ooh = open( objc_header_name, 'w')
rv = [
'// This file is auto generated, Do not edit',
'#pragma once',
'#include "fit.h"',
'',
'',
]
rv.extend( self.context.objc_fit_mesg_defs_forward_declare() )
# types that are useful to define
rv.extend( self.context.types['mesg_num'].objc_typedef() )
rv.extend( self.context.types['fit_base_type'].objc_typedef() )
messages = self.context.arg_messages()
for m in messages:
rv.extend( m.objc_mesg_struct(self.context) )
ooh.write( '\n'.join( rv ) )
def generate_objc_file(self):
objc_dir = self.args.objcdir
objc_file_name = os.path.join( objc_dir, 'rzfit_objc_map.m' )
objc_header = 'rzfit_objc_map.h'
logging.info( 'Writing {}'.format( objc_file_name ) )
oof = open( objc_file_name, 'w')
rv = [
'// This file is auto generated, Do not edit',
'',
'@import Foundation;',
'#include "{}"'.format( objc_header ),
''
'#pragma mark - types conversion section\n',
]
for (n,t) in self.context.types.items():
if t.name != 'mesg_num':
rv.extend( t.objc_func_to_string() )
rv.append( '#pragma mark - message field info' )
for (n,m) in self.context.messages.items():
rv.extend( m.objc_func_field_info(self.context) )
rv.append( '#pragma mark - message field name conversion section' )
for (n,m) in self.context.messages.items():
rv.extend( m.objc_func_field_num_to_string(self.context) )
rv.append( '#pragma mark - public section' )
rv.extend( self.context.types['mesg_num'].objc_func_to_string(fileprivate=False) )
rv.extend( self.context.objc_func_unit_to_string() )
rv.extend( self.context.objc_func_field_num_to_string() )
rv.extend( self.context.objc_func_type_to_string() )
rv.extend( self.context.objc_func_field_info() )
oof.write( '\n'.join( rv ) )
def cmd_generate(self):
self.generate_objc_mesg_def()
self.generate_objc_file()
self.generate_swift_file()
self.generate_swift_reverse_file()
def cmd_message(self):
messages = self.context.arg_messages()
for m in messages:
print( m )
if self.args.message:
fields = self.context.arg_fields(m)
for f in fields:
if self.args.field:
print( f.description() )
else:
print( f )
def cmd_type(self):
types = self.context.arg_types()
for t in types:
if self.args.type:
print( t.description() )
else:
print( t )
if __name__ == "__main__":
commands = {
'message':{'attr':'cmd_message','help':'Show message information'},
'type':{'attr':'cmd_type','help':'Show type information'},
'generate':{'attr':'cmd_generate','help':'Generate swift and objective c files'},
}
description = "\n".join( [ ' {}: {}'.format( k,v['help'] ) for (k,v) in commands.items() ] )
parser = argparse.ArgumentParser( description='Auto Generate Parser files', formatter_class=argparse.RawTextHelpFormatter )
parser.add_argument( 'command', metavar='Command', help = 'command to execute:\n' + description )
parser.add_argument( 'profile', default = 'Profile.xlsx' )
parser.add_argument( '-a', '--annotate', action='store_true', default=False, help = 'Annotate source code with generating code location info' )
parser.add_argument( '-o', '--objcdir', default = '../Sources/FitFileParserObjc' )
parser.add_argument( '-s', '--swiftdir', default = '../Sources/FitFileParser' )
parser.add_argument( '-m', '--message', default = None )
parser.add_argument( '-t', '--type', default = None )
parser.add_argument( '-f', '--field', default = None )
parser.add_argument( '-q', '--quiet', default=False, action='store_true' )
args = parser.parse_args()
command = Command( args )
if args.command in commands:
getattr(command,commands[args.command]['attr'])()
else:
logging.error( 'Invalid command "{}"'.format( args.command) )
parser.print_help()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, unicode_literals
import base64
import cgi
import contextlib
import datetime
import decimal
import json
import time
from mock import Mock, patch
import pytest
import six
from six.moves import range, urllib
import mixpanel
class LogConsumer(object):
def __init__(self):
self.log = []
def send(self, endpoint, event, api_key=None):
if api_key:
self.log.append((endpoint, json.loads(event), api_key))
else:
self.log.append((endpoint, json.loads(event)))
# Convert a query string with base64 data into a dict for safe comparison.
def qs(s):
if isinstance(s, six.binary_type):
s = s.decode('utf8')
blob = cgi.parse_qs(s)
if len(blob['data']) != 1:
pytest.fail('found multi-item data: %s' % blob['data'])
json_bytes = base64.b64decode(blob['data'][0])
blob['data'] = json.loads(json_bytes.decode('utf8'))
return blob
class TestMixpanel:
TOKEN = '12345'
def setup_method(self, method):
self.consumer = LogConsumer()
self.mp = mixpanel.Mixpanel('12345', consumer=self.consumer)
self.mp._now = lambda: 1000.1
def test_track(self):
self.mp.track('ID', 'button press', {'size': 'big', 'color': 'blue'})
assert self.consumer.log == [(
'events', {
'event': 'button press',
'properties': {
'token': self.TOKEN,
'size': 'big',
'color': 'blue',
'distinct_id': 'ID',
'time': int(self.mp._now()),
'mp_lib': 'python',
'$lib_version': mixpanel.__version__,
}
}
)]
def test_import_data(self):
timestamp = time.time()
self.mp.import_data('MY_API_KEY', 'ID', 'button press', timestamp, {'size': 'big', 'color': 'blue'})
assert self.consumer.log == [(
'imports', {
'event': 'button press',
'properties': {
'token': self.TOKEN,
'size': 'big',
'color': 'blue',
'distinct_id': 'ID',
'time': int(timestamp),
'mp_lib': 'python',
'$lib_version': mixpanel.__version__,
},
},
'MY_API_KEY'
)]
def test_track_meta(self):
self.mp.track('ID', 'button press', {'size': 'big', 'color': 'blue'},
meta={'ip': 0})
assert self.consumer.log == [(
'events', {
'event': 'button press',
'properties': {
'token': self.TOKEN,
'size': 'big',
'color': 'blue',
'distinct_id': 'ID',
'time': int(self.mp._now()),
'mp_lib': 'python',
'$lib_version': mixpanel.__version__,
},
'ip': 0,
}
)]
def test_people_set(self):
self.mp.people_set('amq', {'birth month': 'october', 'favorite color': 'purple'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$set': {
'birth month': 'october',
'favorite color': 'purple',
},
}
)]
def test_people_set_once(self):
self.mp.people_set_once('amq', {'birth month': 'october', 'favorite color': 'purple'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$set_once': {
'birth month': 'october',
'favorite color': 'purple',
},
}
)]
def test_people_increment(self):
self.mp.people_increment('amq', {'Albums Released': 1})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$add': {
'Albums Released': 1,
},
}
)]
def test_people_append(self):
self.mp.people_append('amq', {'birth month': 'october', 'favorite color': 'purple'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$append': {
'birth month': 'october',
'favorite color': 'purple',
},
}
)]
def test_people_union(self):
self.mp.people_union('amq', {'Albums': ['Diamond Dogs']})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$union': {
'Albums': ['Diamond Dogs'],
},
}
)]
def test_people_unset(self):
self.mp.people_unset('amq', ['Albums', 'Singles'])
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$unset': ['Albums', 'Singles'],
}
)]
def test_people_remove(self):
self.mp.people_remove('amq', {'Albums': 'Diamond Dogs'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$remove': {'Albums': 'Diamond Dogs'},
}
)]
def test_people_track_charge(self):
self.mp.people_track_charge('amq', 12.65, {'$time': '2013-04-01T09:02:00'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$append': {
'$transactions': {
'$time': '2013-04-01T09:02:00',
'$amount': 12.65,
},
},
}
)]
def test_people_track_charge_without_properties(self):
self.mp.people_track_charge('amq', 12.65)
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$append': {
'$transactions': {
'$amount': 12.65,
},
},
}
)]
def test_people_clear_charges(self):
self.mp.people_clear_charges('amq')
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$unset': ['$transactions'],
}
)]
def test_people_set_created_date_string(self):
created = '2014-02-14T01:02:03'
self.mp.people_set('amq', {'$created': created, 'favorite color': 'purple'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$set': {
'$created': created,
'favorite color': 'purple',
},
}
)]
def test_people_set_created_date_datetime(self):
created = datetime.datetime(2014, 2, 14, 1, 2, 3)
self.mp.people_set('amq', {'$created': created, 'favorite color': 'purple'})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$set': {
'$created': '2014-02-14T01:02:03',
'favorite color': 'purple',
},
}
)]
def test_alias(self):
mock_response = Mock()
mock_response.read.return_value = six.b('{"status":1, "error": null}')
with patch('six.moves.urllib.request.urlopen', return_value=mock_response) as urlopen:
self.mp.alias('ALIAS', 'ORIGINAL ID')
assert self.consumer.log == []
assert urlopen.call_count == 1
((request,), _) = urlopen.call_args
assert request.get_full_url() == 'https://api.mixpanel.com/track'
assert qs(request.data) == \
qs('ip=0&data=eyJldmVudCI6IiRjcmVhdGVfYWxpYXMiLCJwcm9wZXJ0aWVzIjp7ImFsaWFzIjoiQUxJQVMiLCJ0b2tlbiI6IjEyMzQ1IiwiZGlzdGluY3RfaWQiOiJPUklHSU5BTCBJRCJ9fQ%3D%3D&verbose=1')
def test_people_meta(self):
self.mp.people_set('amq', {'birth month': 'october', 'favorite color': 'purple'},
meta={'$ip': 0, '$ignore_time': True})
assert self.consumer.log == [(
'people', {
'$time': int(self.mp._now() * 1000),
'$token': self.TOKEN,
'$distinct_id': 'amq',
'$set': {
'birth month': 'october',
'favorite color': 'purple',
},
'$ip': 0,
'$ignore_time': True,
}
)]
def test_custom_json_serializer(self):
decimal_string = '12.05'
with pytest.raises(TypeError) as excinfo:
self.mp.track('ID', 'button press', {'size': decimal.Decimal(decimal_string)})
assert "not JSON serializable" in str(excinfo.value)
class CustomSerializer(mixpanel.DatetimeSerializer):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return obj.to_eng_string()
self.mp._serializer = CustomSerializer
self.mp.track('ID', 'button press', {'size': decimal.Decimal(decimal_string)})
assert self.consumer.log == [(
'events', {
'event': 'button press',
'properties': {
'token': self.TOKEN,
'size': decimal_string,
'distinct_id': 'ID',
'time': int(self.mp._now()),
'mp_lib': 'python',
'$lib_version': mixpanel.__version__,
}
}
)]
class TestConsumer:
@classmethod
def setup_class(cls):
cls.consumer = mixpanel.Consumer(request_timeout=30)
@contextlib.contextmanager
def _assertSends(self, expect_url, expect_data):
mock_response = Mock()
mock_response.read.return_value = six.b('{"status":1, "error": null}')
with patch('six.moves.urllib.request.urlopen', return_value=mock_response) as urlopen:
yield
assert urlopen.call_count == 1
(call_args, kwargs) = urlopen.call_args
(request,) = call_args
timeout = kwargs.get('timeout', None)
assert request.get_full_url() == expect_url
assert qs(request.data) == qs(expect_data)
assert timeout == self.consumer._request_timeout
def test_send_events(self):
with self._assertSends('https://api.mixpanel.com/track', 'ip=0&data=IkV2ZW50Ig%3D%3D&verbose=1'):
self.consumer.send('events', '"Event"')
def test_send_people(self):
with self._assertSends('https://api.mixpanel.com/engage', 'ip=0&data=IlBlb3BsZSI%3D&verbose=1'):
self.consumer.send('people', '"People"')
def test_unknown_endpoint(self):
with pytest.raises(mixpanel.MixpanelException):
self.consumer.send('unknown', '1')
class TestBufferedConsumer:
@classmethod
def setup_class(cls):
cls.MAX_LENGTH = 10
cls.consumer = mixpanel.BufferedConsumer(cls.MAX_LENGTH)
cls.consumer._consumer = LogConsumer()
cls.log = cls.consumer._consumer.log
def setup_method(self):
del self.log[:]
def test_buffer_hold_and_flush(self):
self.consumer.send('events', '"Event"')
assert len(self.log) == 0
self.consumer.flush()
assert self.log == [('events', ['Event'])]
def test_buffer_fills_up(self):
for i in range(self.MAX_LENGTH - 1):
self.consumer.send('events', '"Event"')
assert len(self.log) == 0
self.consumer.send('events', '"Last Event"')
assert len(self.log) == 1
assert self.log == [('events', [
'Event', 'Event', 'Event', 'Event', 'Event',
'Event', 'Event', 'Event', 'Event', 'Last Event',
])]
def test_unknown_endpoint_raises_on_send(self):
# Ensure the exception isn't hidden until a flush.
with pytest.raises(mixpanel.MixpanelException):
self.consumer.send('unknown', '1')
def test_useful_reraise_in_flush_endpoint(self):
error_mock = Mock()
error_mock.read.return_value = six.b('{"status": 0, "error": "arbitrary error"}')
broken_json = '{broken JSON'
consumer = mixpanel.BufferedConsumer(2)
with patch('six.moves.urllib.request.urlopen', return_value=error_mock):
consumer.send('events', broken_json)
with pytest.raises(mixpanel.MixpanelException) as excinfo:
consumer.flush()
assert excinfo.value.message == '[%s]' % broken_json
assert excinfo.value.endpoint == 'events'
def test_send_remembers_api_key(self):
self.consumer.send('imports', '"Event"', api_key='MY_API_KEY')
assert len(self.log) == 0
self.consumer.flush()
assert self.log == [('imports', ['Event'], 'MY_API_KEY')]
class TestFunctional:
@classmethod
def setup_class(cls):
cls.TOKEN = '12345'
cls.mp = mixpanel.Mixpanel(cls.TOKEN)
cls.mp._now = lambda: 1000
@contextlib.contextmanager
def _assertRequested(self, expect_url, expect_data):
mock_response = Mock()
mock_response.read.return_value = six.b('{"status":1, "error": null}')
with patch('six.moves.urllib.request.urlopen', return_value=mock_response) as urlopen:
yield
assert urlopen.call_count == 1
((request,), _) = urlopen.call_args
assert request.get_full_url() == expect_url
data = urllib.parse.parse_qs(request.data.decode('utf8'))
assert len(data['data']) == 1
payload_encoded = data['data'][0]
payload_json = base64.b64decode(payload_encoded).decode('utf8')
payload = json.loads(payload_json)
assert payload == expect_data
def test_track_functional(self):
expect_data = {'event': {'color': 'blue', 'size': 'big'}, 'properties': {'mp_lib': 'python', 'token': '12345', 'distinct_id': 'button press', '$lib_version': mixpanel.__version__, 'time': 1000}}
with self._assertRequested('https://api.mixpanel.com/track', expect_data):
self.mp.track('button press', {'size': 'big', 'color': 'blue'})
def test_people_set_functional(self):
expect_data = {'$distinct_id': 'amq', '$set': {'birth month': 'october', 'favorite color': 'purple'}, '$time': 1000000, '$token': '12345'}
with self._assertRequested('https://api.mixpanel.com/engage', expect_data):
self.mp.people_set('amq', {'birth month': 'october', 'favorite color': 'purple'})
|
nilq/baby-python
|
python
|
_architecture_template = r'''#!/usr/bin/env bash
EXPERIMENT_NAME="$(basename $(realpath $(pwd)/..))"
SETUP_ID="$(basename $(pwd))"
NAME="${EXPERIMENT_NAME}.${SETUP_ID}-mknet"
USER_ID=${UID}
docker rm -f $NAME
#rm snapshots/*
echo "Starting as user ${USER_ID}"
CONTAINER='%(container)s'
nvidia-docker run --rm \
-u ${USER_ID} \
-v /groups/turaga:/groups/turaga \
-v /groups/saalfeld:/groups/saalfeld \
-v /nrs/saalfeld:/nrs/saalfeld \
-w ${PWD} \
--name ${NAME} \
"${CONTAINER}" \
/bin/bash -c "export CUDA_VISIBLE_DEVICES=0; %(command)s %(args)s"
'''
_training_template = r'''#!/usr/bin/env bash
WD=$(pwd)
EXPERIMENT_NAME="$(basename $(realpath $(pwd)/..))"
SETUP_ID="$(basename $(pwd))"
NAME="${EXPERIMENT_NAME}.${SETUP_ID}-training"
USER_ID=${UID}
docker rm -f $NAME
#rm snapshots/*
echo "Starting as user ${USER_ID}"
cd /groups/turaga
cd /groups/saalfeld
cd /nrs/saalfeld
cd $WD
CONTAINER='%(container)s'
nvidia-docker run --rm \
-u ${USER_ID} \
-v /groups/turaga:/groups/turaga:rshared \
-v /groups/saalfeld:/groups/saalfeld:rshared \
-v /nrs/saalfeld:/nrs/saalfeld:rshared \
-w ${PWD} \
--name ${NAME} \
"${CONTAINER}" \
/bin/bash -c "export CUDA_VISIBLE_DEVICES=$1; %(command)s %(args)s 2>&1 | tee -a logfile"
'''
_architecture_template_no_docker = r'''#!/usr/bin/env bash
if [ -d "${PWD}/conda-env" ]; then
echo 'activating conda'
. $HOME/miniconda3/etc/profile.d/conda.sh
conda activate "${PWD}/conda-env"
# conda command not exported to subshell
# https://github.com/conda/conda/issues/7753
fi
echo "Make networks"
%(command)s %(args)s
'''
_training_template_no_docker = r'''#!/usr/bin/env bash
if [ -d "${PWD}/conda-env" ]; then
echo 'activating conda'
. $HOME/miniconda3/etc/profile.d/conda.sh
conda activate "${PWD}/conda-env"
# conda command not exported to subshell
# https://github.com/conda/conda/issues/7753
fi
export CUDA_VISIBLE_DEVICES=$1;
echo "Start training with GPU ${CUDA_VISIBLE_DEVICES}"
%(command)s %(args)s 2>&1| tee -a logfile
'''
def make_architecture(container, command, args):
return _architecture_template % (dict(container=container, command=command, args=args))
def make_training(container, command, args):
return _training_template % (dict(container=container, command=command, args=args))
def make_architecture_no_docker(command, args):
return _architecture_template_no_docker % (dict(command=command, args=args))
def make_training_no_docker(command, args):
return _training_template_no_docker % (dict(command=command, args=args))
|
nilq/baby-python
|
python
|
import pathlib
from pw_manager.utils import constants, utils
from colorama import Fore, Style
def require_valid_db(enter_confirmation=False):
def decorator(func):
def inner(*args, **kwargs):
if constants.db_file is None:
print(f"{Fore.RED}You need to select a database first!{Style.RESET_ALL}")
if enter_confirmation:
utils.enter_confirmation()
return
else:
func(*args, **kwargs)
return inner
return decorator
def require_valid_sync_config(enter_confirmation=False):
def decorator(func):
def inner(*args, **kwargs):
if not pathlib.Path(utils.get_sync_file()).exists():
print(f"{Fore.RED}You need to setup your sync settings first!{Style.RESET_ALL}")
if enter_confirmation:
utils.enter_confirmation()
return
func(*args, **kwargs)
return inner
return decorator
def catch_ctrl_c(func):
def inner(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
return
return inner
|
nilq/baby-python
|
python
|
import os
import numpy as np
import logging
from app_globals import *
from alad_support import *
from r_support import matrix, cbind
from forest_aad_detector import *
from forest_aad_support import prepare_forest_aad_debug_args
from results_support import write_sequential_results_to_csv
from data_stream import *
"""
To debug:
pythonw pyalad/forest_aad_stream.py
"""
logger = logging.getLogger(__name__)
class StreamingAnomalyDetector(object):
"""
Attributes:
model: AadForest
stream: DataStream
max_buffer: int
Determines the window size
buffer_instances_x: list
"""
def __init__(self, stream, model, labeled_x=None, labeled_y=None,
unlabeled_x=None, unlabeled_y=None, opts=None, max_buffer=512):
self.model = model
self.stream = stream
self.max_buffer = max_buffer
self.opts = opts
self.buffer_x = None
self.buffer_y = None
self.unlabeled_x = unlabeled_x
self.unlabeled_y = unlabeled_y
self.labeled_x = labeled_x
self.labeled_y = labeled_y
self.qstate = None
def reset_buffer(self):
self.buffer_x = None
self.buffer_y = None
def add_buffer_xy(self, x, y):
if self.buffer_x is None:
self.buffer_x = x
else:
self.buffer_x = rbind(self.buffer_x, x)
if self.buffer_y is None:
self.buffer_y = y
else:
if y is not None:
self.buffer_y = append(self.buffer_y, y)
def move_buffer_to_unlabeled(self):
self.unlabeled_x = self.buffer_x
self.unlabeled_y = self.buffer_y
self.reset_buffer()
def get_num_instances(self):
"""Returns the total number of labeled and unlabeled instances that will be used for weight inference"""
n = 0
if self.unlabeled_x is not None:
n += nrow(self.unlabeled_x)
if self.labeled_x is not None:
# logger.debug("labeled_x: %s" % str(self.labeled_x.shape))
n += nrow(self.labeled_x)
return n
def init_query_state(self, opts):
n = self.get_num_instances()
bt = get_budget_topK(n, opts)
self.qstate = Query.get_initial_query_state(opts.qtype, opts=opts, qrank=bt.topK,
a=1., b=1., budget=bt.budget)
def get_next_from_stream(self, n=0):
if n == 0:
n = self.max_buffer
x, y = self.stream.read_next_from_stream(n)
if x is None:
return x, y
if False:
if self.buffer_x is not None:
logger.debug("buffer shape: %s" % str(self.buffer_x.shape))
logger.debug("x.shape: %s" % str(x.shape))
self.add_buffer_xy(x, y)
self.model.add_samples(x, current=False)
return x, y
def update_model_from_buffer(self):
self.model.update_model_from_stream_buffer()
def get_next_transformed(self, n=1):
x, y = self.get_next_from_stream(n)
if x is None:
return x, y
x_new = self.model.transform_to_region_features(x, dense=False)
return x_new, y
def stream_buffer_empty(self):
return self.stream.empty()
def get_anomaly_scores(self, x):
x_new = self.model.transform_to_region_features(x, dense=False)
scores = self.model.get_score(x_new)
return scores
def setup_data_for_feedback(self):
"""
Prepares the input matrices/data structures for weight update. The format
is such that the top rows of data matrix are labeled and below are unlabeled.
:return: (np.ndarray, np.array, np.array, np.array)
(x, y, ha, hn)
x - data matrix, y - labels (np.nan for unlabeled),
ha - indexes of labeled anomalies, hn - indexes of labeled nominals
"""
x = None
y = None
if self.labeled_x is not None:
x = self.labeled_x.copy()
y = self.labeled_y.copy()
ha = np.where(self.labeled_y == 1)[0]
hn = np.where(self.labeled_y == 0)[0]
else:
ha = np.zeros(0, dtype=int)
hn = np.zeros(0, dtype=int)
if self.unlabeled_x is not None:
if x is None:
x = self.unlabeled_x.copy()
else:
x = np.append(x, self.unlabeled_x, axis=0)
if self.unlabeled_y is not None:
if y is not None:
y = np.append(y, self.unlabeled_y)
else:
y = self.unlabeled_y.copy()
else:
if y is not None:
y = np.append(y, np.ones(nrow(self.unlabeled_x), dtype=int) * -1)
else:
y = np.ones(nrow(self.unlabeled_x), dtype=int) * -1
if False:
logger.debug("x: %d, y: %d, ha: %d, hn:%d" % (nrow(x), len(y), len(ha), len(hn)))
return x, y, ha, hn
def get_instance_stats(self):
nha = nhn = nul = 0
if self.labeled_y is not None:
nha = len(np.where(self.labeled_y == 1)[0])
nhn = len(np.where(self.labeled_y == 0)[0])
if self.unlabeled_x is not None:
nul = nrow(self.unlabeled_x)
return nha, nhn, nul
def get_num_labeled(self):
"""Returns the number of instances for which we already have label feedback"""
if self.labeled_y is not None:
return len(self.labeled_y)
return 0
def get_query_data(self, x=None, y=None, ha=None, hn=None, unl=None, w=None, n_query=1):
"""Returns the best instance that should be queried, along with other data structures
Args:
x: np.ndarray
input instances (labeled + unlabeled)
y: np.array
labels for instances which are already labeled, else some dummy values
ha: np.array
indexes of labeled anomalies
hn: np.array
indexes of labeled nominals
unl: np.array
unlabeled instances that should be ignored for query
w: np.array
current weight vector
n_query: int
number of instances to query
"""
n = self.get_num_instances()
n_feedback = self.get_num_labeled()
if False:
logger.debug("get_query_data() n: %d, n_feedback: %d" % (n, n_feedback))
if n == 0:
raise ValueError("No instances available")
if x is None:
x, y, ha, hn = self.setup_data_for_feedback()
if w is None:
w = self.model.w
if unl is None:
unl = np.zeros(0, dtype=int)
# the top n_feedback instances in the instance list are the labeled items
queried_items = append(np.arange(n_feedback), unl)
x_transformed = self.model.transform_to_region_features(x, dense=False)
order_anom_idxs, anom_score = self.model.order_by_score(x_transformed)
xi = self.qstate.get_next_query(maxpos=n, ordered_indexes=order_anom_idxs,
queried_items=queried_items,
x=x_transformed, lbls=y, anom_score=anom_score,
w=w, hf=append(ha, hn),
remaining_budget=self.opts.budget - n_feedback,
n=n_query)
if False:
logger.debug("ordered instances[%d]: %s\nha: %s\nhn: %s\nxi: %s" %
(self.opts.budget, str(list(order_anom_idxs[0:self.opts.budget])),
str(list(ha)), str(list(hn)), str(list(xi))))
return xi, x, y, x_transformed, ha, hn, order_anom_idxs, anom_score
def move_unlabeled_to_labeled(self, xi, yi):
unlabeled_idx = xi - self.get_num_labeled()
self.labeled_x = rbind(self.labeled_x, matrix(self.unlabeled_x[unlabeled_idx], nrow=1))
if self.labeled_y is None:
self.labeled_y = np.array([yi], dtype=int)
else:
self.labeled_y = np.append(self.labeled_y, [yi])
mask = np.ones(self.unlabeled_x.shape[0], dtype=bool)
mask[unlabeled_idx] = False
self.unlabeled_x = self.unlabeled_x[mask]
self.unlabeled_y = self.unlabeled_y[mask]
def update_weights_with_feedback(self, xi, yi, x, y, x_transformed, ha, hn, opts):
"""Relearns the optimal weights from feedback and updates internal labeled and unlabeled matrices
IMPORTANT:
This API assumes that the input x, y, x_transformed are consistent with
the internal labeled/unlabeled matrices, i.e., the top rows/values in
these matrices are from labeled data and bottom ones are from internally
stored unlabeled data.
"""
# Add the newly labeled instance to the corresponding list of labeled
# instances and remove it from the unlabeled set.
self.move_unlabeled_to_labeled(xi, yi)
if yi == 1:
ha = append(ha, [xi])
else:
hn = append(hn, [xi])
self.model.update_weights(x_transformed, y, ha, hn, opts)
def get_score_variance(self, x, n_instances, opts, transform=False):
"""Computes variance in scores of top ranked instances
"""
w = self.model.w
if w is None:
raise ValueError("Model not trained")
if transform:
x = self.model.transform_to_region_features(x, dense=False)
ordered_indexes, scores = self.model.order_by_score(x, w=w)
bt = get_budget_topK(n_instances, opts)
tn = min(10, nrow(x))
vars = np.zeros(tn, dtype=float)
for i in np.arange(tn):
vars[i] = get_linear_score_variance(x[ordered_indexes[i], :], w)
# logger.debug("top %d vars:\n%s" % (tn, str(list(vars))))
return vars
def get_rearranging_indexes(add_pos, move_pos, n):
"""Creates an array 0...n-1 and moves value at 'move_pos' to 'add_pos', and shifts others back
Useful to reorder data when we want to move instances from unlabeled set to labeled.
TODO:
Use this to optimize the API StreamingAnomalyDetector.get_query_data()
since it needs to repeatedly convert the data to transformed [node] features.
Example:
get_rearranging_indexes(2, 2, 10):
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
get_rearranging_indexes(0, 1, 10):
array([1, 0, 2, 3, 4, 5, 6, 7, 8, 9])
get_rearranging_indexes(2, 9, 10):
array([0, 1, 9, 2, 3, 4, 5, 6, 7, 8])
:param add_pos:
:param move_pos:
:param n:
:return:
"""
if add_pos > move_pos:
raise ValueError("add_pos must be less or equal to move_pos")
rearr_idxs = np.arange(n)
if add_pos == move_pos:
return rearr_idxs
rearr_idxs[(add_pos + 1):(move_pos + 1)] = rearr_idxs[add_pos:move_pos]
rearr_idxs[add_pos] = move_pos
return rearr_idxs
def read_data(opts):
data = DataFrame.from_csv(opts.datafile, header=0, sep=',', index_col=None)
X_train = np.zeros(shape=(data.shape[0], data.shape[1] - 1))
for i in range(X_train.shape[1]):
X_train[:, i] = data.iloc[:, i + 1]
labels = np.array([1 if data.iloc[i, 0] == "anomaly" else 0 for i in range(data.shape[0])], dtype=int)
return X_train, labels
def train_aad_model(opts, X_train):
rng = np.random.RandomState(opts.randseed + opts.fid * opts.reruns + opts.runidx)
# fit the model
model = AadForest(n_estimators=opts.forest_n_trees,
max_samples=min(opts.forest_n_samples, X_train.shape[0]),
score_type=opts.forest_score_type, random_state=rng,
add_leaf_nodes_only=opts.forest_add_leaf_nodes_only,
max_depth=opts.forest_max_depth,
ensemble_score=opts.ensemble_score,
detector_type=opts.detector_type, n_jobs=opts.n_jobs)
model.fit(X_train)
return model
def prepare_aad_model(X, y, opts):
if opts.load_model and opts.modelfile != "" and os.path.isfile(opts.modelfile):
logger.debug("Loading model from file %s" % opts.modelfile)
model = load_aad_model(opts.modelfile)
else:
model = train_aad_model(opts, X)
logger.debug("total #nodes: %d" % (len(model.all_regions)))
if False:
if model.w is not None:
logger.debug("w:\n%s" % str(list(model.w)))
else:
logger.debug("model weights are not set")
return model
def run_feedback(sad, min_feedback, max_feedback, opts):
"""
:param sad: StreamingAnomalyDetector
:param max_feedback: int
:param opts: Opts
:return:
"""
if False:
# get baseline metrics
x_transformed = sad.model.transform_to_region_features(sad.unlabeled_x, dense=False)
ordered_idxs, _ = sad.model.order_by_score(x_transformed)
seen_baseline = sad.unlabeled_y[ordered_idxs[0:max_feedback]]
num_seen_baseline = np.cumsum(seen_baseline)
logger.debug("num_seen_baseline:\n%s" % str(list(num_seen_baseline)))
# baseline scores
w_unif = sad.model.get_uniform_weights()
x_transformed_baseline = sad.model.transform_to_region_features(sad.unlabeled_x, dense=False)
order_baseline, scores_baseline = sad.model.order_by_score(x_transformed_baseline, w_unif)
n_seen_baseline = min(max_feedback, len(sad.unlabeled_y))
queried_baseline = order_baseline[0:n_seen_baseline]
seen_baseline = sad.unlabeled_y[queried_baseline]
# seen_baseline = min(max_feedback, len(sad.unlabeled_y))
# found_baseline = np.sum(sad.unlabeled_y[order_baseline[0:seen_baseline]])
seen = np.zeros(0, dtype=int)
queried = np.zeros(0, dtype=int)
unl = np.zeros(0, dtype=int)
i = 0
while i < max_feedback:
i += 1
# scores based on current weights
xi_, x, y, x_transformed, ha, hn, order_anom_idxs, anom_score = \
sad.get_query_data(unl=unl, n_query=max_feedback)
order_anom_idxs_minus_ha_hn = get_first_vals_not_marked(
order_anom_idxs, append(ha, hn), n=len(order_anom_idxs))
bt = get_budget_topK(x_transformed.shape[0], opts)
# Note: We will ensure that the tau-th instance is atleast 10-th (or lower) ranked
tau_rank = min(max(bt.topK, 10), x.shape[0])
xi = xi_[0]
means = vars = qpos = m_tau = v_tau = None
if opts.query_confident:
# get the mean score and its variance for the top ranked instances
# excluding the instances which have already been queried
means, vars, test, v_eval, _ = get_score_variances(x_transformed, sad.model.w,
n_test=tau_rank,
ordered_indexes=order_anom_idxs,
queried_indexes=append(ha, hn))
# get the mean score and its variance for the tau-th ranked instance
m_tau, v_tau, _, _, _ = get_score_variances(x_transformed[order_anom_idxs_minus_ha_hn[tau_rank]],
sad.model.w, n_test=1,
test_indexes=np.array([0], dtype=int))
qpos = np.where(test == xi)[0] # top-most ranked instance
if False and opts.query_confident:
logger.debug("tau score:\n%s (%s)" % (str(list(m_tau)), str(list(v_tau))))
strmv = ",".join(["%f (%f)" % (means[j], vars[j]) for j in np.arange(len(means))])
logger.debug("scores:\n%s" % strmv)
# check if we are confident that this is larger than the tau-th ranked instance
if (not opts.query_confident) or (i <= min_feedback or
means[qpos] - 3. * np.sqrt(vars[qpos]) >= m_tau):
seen = append(seen, [y[xi]])
queried = append(queried, xi)
# seen += 1
# found += y[xi]
tm_update = Timer()
sad.update_weights_with_feedback(xi, y[xi], x, y, x_transformed, ha, hn, opts)
tm_update.end()
# reset the list of queried test instances because their scores would have changed
unl = np.zeros(0, dtype=int)
if True:
nha, nhn, nul = sad.get_instance_stats()
# logger.debug("xi:%d, test indxs: %s, qpos: %d" % (xi, str(list(test)), qpos))
# logger.debug("orig scores:\n%s" % str(list(anom_score[order_anom_idxs[0:tau_rank]])))
logger.debug("[%d] #feedback: %d; ha: %d; hn: %d, mnw: %d, mxw: %d; update: %f sec(s)" %
(i, nha + nhn, nha, nhn, min_feedback, max_feedback, tm_update.elapsed()))
else:
# ignore this instance from query
unl = append(unl, [xi])
# logger.debug("skipping feedback for xi=%d at iter %d; unl: %s" % (xi, i, str(list(unl))))
continue
# logger.debug("y:\n%s" % str(list(y)))
# logger.debug("w:\n%s" % str(list(sad.model.w)))
# logger.debug("\nseen : %s\nqueried: %s" % (str(list(seen)), str(list(queried))))
return seen, seen_baseline, None, None
def main():
if False:
# DEBUG
args = prepare_forest_aad_debug_args()
else:
# PRODUCTION
args = get_command_args(debug=False)
# print "log file: %s" % args.log_file
configure_logger(args)
opts = Opts(args)
# print opts.str_opts()
logger.debug(opts.str_opts())
if not opts.streaming:
raise ValueError("Only streaming supported")
X_full, y_full = read_data(opts)
# X_train = X_train[0:10, :]
# labels = labels[0:10]
logger.debug("loaded file: (%s) %s" % (str(X_full.shape), opts.datafile))
logger.debug("results dir: %s" % opts.resultsdir)
all_num_seen = None
all_num_seen_baseline = None
all_window = None
all_window_baseline = None
aucs = np.zeros(0, dtype=float)
opts.fid = 1
for runidx in opts.get_runidxs():
tm_run = Timer()
opts.set_multi_run_options(opts.fid, runidx)
stream = DataStream(X_full, y_full)
X_train, y_train = stream.read_next_from_stream(opts.stream_window)
# logger.debug("X_train:\n%s\nlabels:\n%s" % (str(X_train), str(list(labels))))
model = prepare_aad_model(X_train, y_train, opts) # initial model training
sad = StreamingAnomalyDetector(stream, model, unlabeled_x=X_train, unlabeled_y=y_train,
max_buffer=opts.stream_window, opts=opts)
sad.init_query_state(opts)
if False:
# use for DEBUG only
run_feedback(sad, 0, opts.budget, opts)
print "This is experimental/demo code for streaming integration and will be application specific." + \
" Exiting after reading max %d instances from stream and iterating for %d feedback..." % \
(opts.stream_window, opts.budget)
exit(0)
all_scores = np.zeros(0)
all_y = np.zeros(0, dtype=int)
scores = sad.get_anomaly_scores(X_train)
# auc = fn_auc(cbind(y_train, -scores))
all_scores = np.append(all_scores, scores)
all_y = np.append(all_y, y_train)
iter = 0
seen = np.zeros(0, dtype=int)
seen_baseline = np.zeros(0, dtype=int)
stream_window_tmp = np.zeros(0, dtype=int)
stream_window_baseline = np.zeros(0, dtype=int)
stop_iter = False
while not stop_iter:
iter += 1
tm = Timer()
seen_, seen_baseline_, queried_, queried_baseline_ = run_feedback(sad,
opts.min_feedback_per_window,
opts.max_feedback_per_window,
opts)
seen = append(seen, seen_)
seen_baseline = append(seen_baseline, seen_baseline_)
stream_window_tmp = append(stream_window_tmp, np.ones(len(seen_)) * iter)
stream_window_baseline = append(stream_window_baseline, np.ones(len(seen_baseline_)) * iter)
# queried = append(queried, queried_)
# queried_baseline = append(queried_baseline, queried_baseline_)
# logger.debug("seen:\n%s;\nbaseline:\n%s" % (str(list(seen)), str(list(seen_baseline))))
x_eval, y_eval = sad.get_next_from_stream(sad.max_buffer)
if x_eval is None or iter >= opts.max_windows:
if iter >= opts.max_windows:
logger.debug("Exceeded %d iters; exiting stream read..." % opts.max_windows)
stop_iter = True
else:
scores = sad.get_anomaly_scores(x_eval) # compute scores before updating the model
all_scores = np.append(all_scores, scores)
all_y = np.append(all_y, y_eval)
if opts.allow_stream_update:
sad.update_model_from_buffer()
sad.move_buffer_to_unlabeled()
logger.debug(tm.message("Stream window [%d]: algo [%d/%d]; baseline [%d/%d]: " %
(iter, np.sum(seen), len(seen), np.sum(seen_baseline), len(seen_baseline))))
auc = fn_auc(cbind(all_y, -all_scores))
# logger.debug("AUC: %f" % auc)
aucs = append(aucs, [auc])
# queried_baseline = order(all_scores, decreasing=True)[0:opts.budget]
num_seen_tmp = np.cumsum(seen) # np.cumsum(all_y[queried])
# logger.debug("\nnum_seen : %s" % (str(list(num_seen_tmp)),))
num_seen_baseline = np.cumsum(seen_baseline) # np.cumsum(all_y[queried_baseline])
# logger.debug("Numseen in %d budget (overall):\n%s" % (opts.budget, str(list(num_seen_baseline))))
stream_window_baseline = append(np.array([opts.fid, opts.runidx],
dtype=stream_window_baseline.dtype),
stream_window_baseline)
stream_window = np.ones(len(stream_window_baseline) + 2, dtype=stream_window_tmp.dtype) * -1
stream_window[0:2] = [opts.fid, opts.runidx]
stream_window[2:(2+len(stream_window_tmp))] = stream_window_tmp
# queried = append(np.array([opts.fid, opts.runidx], dtype=queried.dtype), queried)
# queried_baseline = append(np.array([opts.fid, opts.runidx], dtype=queried_baseline.dtype), queried_baseline)
# num_seen_baseline has the uniformly maximum number of queries.
# the number of queries in num_seen will vary under the query confidence mode
num_seen = np.ones(len(num_seen_baseline) + 2, dtype=num_seen_tmp.dtype) * -1
num_seen[0:2] = [opts.fid, opts.runidx]
num_seen[2:(2+len(num_seen_tmp))] = num_seen_tmp
num_seen_baseline = append(np.array([opts.fid, opts.runidx], dtype=num_seen_baseline.dtype), num_seen_baseline)
# all_queried = rbind(all_queried, matrix(queried, nrow=1))
# all_queried_baseline = rbind(all_queried_baseline, matrix(queried_baseline, nrow=1))
all_num_seen = rbind(all_num_seen, matrix(num_seen, nrow=1))
all_num_seen_baseline = rbind(all_num_seen_baseline, matrix(num_seen_baseline, nrow=1))
all_window = rbind(all_window, matrix(stream_window, nrow=1))
all_window_baseline = rbind(all_window_baseline, matrix(stream_window_baseline, nrow=1))
logger.debug(tm_run.message("Completed runidx: %d" % runidx))
results = SequentialResults(num_seen=all_num_seen,
# true_queried_indexes=all_queried,
num_seen_baseline=all_num_seen_baseline,
# true_queried_indexes_baseline=all_queried_baseline,
stream_window=all_window,
stream_window_baseline=all_window_baseline,
aucs=aucs)
write_sequential_results_to_csv(results, opts)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import requests
class AppClient:
def __init__(self, endpoint: str = 'http://localhost:5000'):
self._endpoint = endpoint
def get_index(self):
return requests.get(self._endpoint).text
|
nilq/baby-python
|
python
|
import rng
import socket
import pytest
@pytest.fixture
def index_test():
return rng.index()
def test_index_content(index_test):
hostname = socket.gethostname()
assert "RNG running on {}\n".format(hostname) in index_test
def test_rng_status():
statuscode = rng.rng(32).status_code
assert statuscode == 200
|
nilq/baby-python
|
python
|
from PyCA.Core import *
import PyCA.Common as common
import PyCA.Display as display
import numpy as np
import matplotlib.pyplot as plt
def PrimalDualTV(I0, \
DataFidC, \
TVC = 1.0, \
nIters = 5000, \
stepP = None, \
stepI = None, \
disp = False, \
dispEvery = 0):
#
# Initialize data
#
mType = I0.memType()
grid = I0.grid().copy()
if stepP == None:
stepP = 1.0/8.0
if stepI == None:
stepI = min(stepP,1.0/DataFidC)
bc = BC_CLAMP
# bc = BC_WRAP
# primal var
I = I0.copy()
# I = Image3D(grid, mType)
# SetMem(I, 0.0)
# dual var
p = Field3D(grid, mType)
# zerovec = Vec3Df(0.0,0.0,0.0)
# SetMem(p, zerovec)
Gradient(p, I0, DIFF_FORWARD, bc)
ReprojectToUnitVec(p)
# Initialize other data
energy = [[] for _ in xrange(2)]
#
# Allocate all necessary data
#
scratchI = Image3D(grid, mType)
scratchI2 = Image3D(grid, mType)
scratchV = Field3D(grid, mType)
EnergyFig = plt.figure('PrimalDual Energy');
plt.clf();
ResultFig = plt.figure('PrimalDual Results');
plt.clf();
# overwrites LDefSum
def plotResults(fig,cmap='gray',rng=[0,1]):
plt.figure(fig)
plt.subplot(1,3,1)
display.DispImage(I0, 'Orig', cmap=cmap, \
newFig=False, rng=rng, t=False)
plt.subplot(1,3,2)
display.DispImage(I, 'Denoised', cmap=cmap, \
newFig=False, rng=rng, t=False)
Sub(scratchI, I, I0)
plt.subplot(1,3,3)
display.DispImage(scratchI, 'diff', cmap=cmap, \
newFig=False, rng=None, t=False)
plt.draw()
plt.show()
def plotEnergy(en, fig):
plt.figure(fig)
plt.plot(en[0][1:],'r')
plt.hold(True)
plt.plot(en[1][1:],'g')
plt.hold(False)
plt.draw()
plt.show()
for k in range(nIters+1):
print 'iteration %d...'%k
#
# Display images
#
if disp and dispEvery > 0 and k%dispEvery == 0:
plotResults(ResultFig.number)
#
# Compute energy
#
# primal energy
Sub(scratchI, I, I0)
primalEnergy = (DataFidC/2.0)*Sum2(scratchI)
GradientMag(scratchI, I, DIFF_FORWARD, bc)
primalEnergy += TVC*Sum(scratchI)
# dual energy
Divergence(scratchI, p, DIFF_BACKWARD, bc)
MulC_I(scratchI, TVC/DataFidC)
Sqr_I(scratchI)
Divergence(scratchI2, p, DIFF_BACKWARD, bc)
MulC_I(scratchI2, 2.0*(TVC/DataFidC))
Mul_I(scratchI2, I0)
Add_I(scratchI, scratchI2)
dualEnergy = (-DataFidC/2.0)*Sum(scratchI)
energy[0].append(primalEnergy)
energy[1].append(dualEnergy)
if disp and dispEvery > 0 and k%dispEvery == 0:
plotEnergy(energy, EnergyFig.number)
# just compute energy on final iteration
if k >= nIters:
break
# primal step
# scratchI = I - I0 - (TVC/DataFidC)*div(p)
Divergence(scratchI, p, DIFF_BACKWARD, bc)
MulC_I(scratchI, -TVC/DataFidC)
Sub(scratchI2, I, I0)
Add_I(scratchI, scratchI2)
# I = I - stepI*gI
Add_MulC_I(I, scratchI, -stepI)
# dual step
Gradient(scratchV, I, DIFF_FORWARD, bc)
# weighting update by 1/TVC to speed convergence
#Add_MulC_I(p, scratchV, stepP*TVC)
Add_MulC_I(p, scratchV, stepP)
# reproject onto constraint
ReprojectToUnitVec(p)
if disp:
plotResults(ResultFig.number)
plotEnergy(energy, EnergyFig.number)
return (I, energy)
#
# End function
#
if __name__ == '__main__':
plt.close('all')
# number of iterations
nIters = 2000
disp = True
dispEvery = 1000
if GetNumberOfCUDADevices() > 0:
mType = MEM_DEVICE
else:
print "No CUDA devices found, running on CPU"
mType = MEM_HOST
# data fidelity modifier
DataFidC = 1.0
TVC = 0.05
imagedir='./Images/'
#
# Run lena images
#
I0 = common.LoadPNGImage(imagedir + 'lena_orig.png', mType)
imSz = I0.size()
sz = imSz.tolist()[0:2]
(I,energy) = \
PrimalDualTV(I0, \
DataFidC, \
TVC = TVC, \
nIters = nIters, \
stepP = 1.0, \
stepI = 1.0/16.0, \
disp = disp, \
dispEvery = dispEvery)
|
nilq/baby-python
|
python
|
# Jan28Report on General Accureacy #####################################################################################
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-23-2020-21-N-UseSpark-R0-noOpt'
# notes = 'UseSpark-R0-noOpt'
# date = 'Jan-24-2020-2-N-UseSpark-R1-noOpt'
# notes = 'UseSpark-R1-noOpt'
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-24-2020-3-N-UseSpark-R1-bsfKimOnly'
# notes = 'UseSpark-R1-bsfKimOnly'
# Jan31Report on TraditionalDTW+LBOpt vs. FastDTW+NoOpt#################################################################
# date = 'Jan-30-2020-12-N-UseSpark-R1-noOptFastDTW_numSample400'
# notes = 'UseSpark-R1-noOptFastDTW_numSample400'
date = 'Jan-30-2020-15-N-UseSpark-R1-LBOptNormalDTW_numSample400'
notes = 'UseSpark-R1-LBOptNormalDTW_numSample400'
# paa_data folder is /home/apocalyvec/PycharmProjects/Genex/genex/experiments/results/
|
nilq/baby-python
|
python
|
# noinspection PyShadowingBuiltins,PyUnusedLocal
def sum(x, y):
if not 0 <= x <= 100:
raise ValueError('arg x must be between 0 and 100')
if not 0 <= x <= 100:
raise ValueError('arg y must be between 0 and 100')
return x + y
|
nilq/baby-python
|
python
|
"""
Fabric tools for managing users
"""
from __future__ import with_statement
from fabric.api import *
def exists(name):
"""
Check if user exists
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
return sudo('getent passwd %(name)s' % locals()).succeeded
def create(name, home=None, shell=None, uid=None, gid=None, groups=None):
"""
Create a new user
"""
options = []
if gid:
options.append('--gid "%s"' % gid)
if groups:
if not isinstance(groups, basestring):
groups = ','.join('"%s"' % group for group in groups)
options.append('--groups %s' % groups)
if home:
options.append('--home-dir "%s"' % home)
if shell:
options.append('--shell "%s"' % (shell))
if uid:
options.append('--uid %s' % uid)
options = " ".join(options)
sudo('useradd %(options)s %(name)s' % locals())
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding: utf8
from __future__ import print_function
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from enum import Enum
from collections import MutableSequence
from collections import namedtuple
from collections import OrderedDict
from itertools import chain
import bisect
import logging
import struct
import array
import hashlib
import json
from .helper import ts_daily_left, ts_daily_right
from .helper import ts_hourly_left, ts_hourly_right
from .helper import ts_weekly_left, ts_weekly_right
from .helper import ts_monthly_left, ts_monthly_right
Aggregation = namedtuple('Aggregation', ['min', 'max', 'sum', 'count'])
class BucketType(Enum):
dynamic = 1
hourly = 2
daily = 3
weekly = 4
monthly = 5
resultset = 6
class ItemType(Enum):
raw_float = 1
raw_int = 2
tuple_float_2 = 3
tuple_float_3 = 4
tuple_float_4 = 5
basic_aggregation = 6
class TupleArray(MutableSequence):
def __init__(self, data_type="f", tuple_size=2):
if tuple_size < 2 or tuple_size > 20:
raise ValueError("invalid tuple size (2-20)")
super(TupleArray, self).__init__()
self.data_type = data_type
self.tuple_size = tuple_size
self._arrays = [array.array(data_type) for i in range(tuple_size)]
def __len__(self):
return len(self._arrays[0])
def __getitem__(self, ii):
return tuple(item[ii] for item in self._arrays)
def __delitem__(self, ii):
for a in self._arrays:
del a[ii]
def __setitem__(self, ii, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i][ii] = v
return tuple(item[ii] for item in self._arrays)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<TupleArray {} x {}>".format(self.data_type, self.tuple_size)
def insert(self, ii, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i].insert(ii, v)
def append(self, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i].append(v)
def tostring(self):
return b"".join([x.tostring() for x in self._arrays])
def fromstring(self, string):
s = len(string) / len(self._arrays)
for i, a in enumerate(self._arrays):
f = int(i * s)
t = int(i * s + s)
a.fromstring(string[f:t])
class Bucket(object):
def __init__(self, parent, key, range_key, values=None):
self.parent = parent
self._dirty = False
self._existing = False
self._range_min = 0
self._range_max = 0
self.set_range_key(range_key)
# Create Data Structures
self._timestamps = array.array("I")
if self.item_type == ItemType.raw_float:
self._values = array.array("f")
elif self.item_type == ItemType.raw_int:
self._values = array.array("I")
elif self.item_type == ItemType.tuple_float_2:
self._values = TupleArray("f", 2)
elif self.item_type == ItemType.tuple_float_3:
self._values = TupleArray("f", 3)
elif self.item_type == ItemType.tuple_float_4:
self._values = TupleArray("f", 4)
else:
raise NotImplementedError("invalid item type")
if values is not None:
self.insert(values)
@property
def item_type(self):
return self.parent.item_type
@property
def bucket_type(self):
return self.parent.bucket_type
@property
def key(self):
return self.parent.key
@property
def existing(self):
return self._existing
@property
def dirty(self):
return self._dirty
def reset_dirty(self):
self._dirty = False
@property
def range_key(self):
return self._range_min
def set_range_key(self, range_key):
if self.bucket_type == BucketType.hourly:
l = ts_hourly_left(range_key)
r = ts_hourly_right(range_key)
elif self.bucket_type == BucketType.daily:
l = ts_daily_left(range_key)
r = ts_daily_right(range_key)
elif self.bucket_type == BucketType.weekly:
l = ts_weekly_left(range_key)
r = ts_weekly_right(range_key)
elif self.bucket_type == BucketType.monthly:
l = ts_monthly_left(range_key)
r = ts_monthly_right(range_key)
else:
raise NotImplementedError("invalid bucket type")
if l != range_key:
raise ValueError("invalid range key: %s" % range_key)
self._range_min = l
self._range_max = r
@property
def range_min(self):
return self._range_min
@property
def range_max(self):
return self._range_max
def __len__(self):
return len(self._timestamps)
def __bool__(self): # Python 3
if len(self) < 1:
return False
if len(self._timestamps) != len(self._values):
return False
# Check if sorted
it = iter(self._timestamps)
it.__next__()
return all(b >= a for a, b in zip(self._timestamps, it))
def __nonzero__(self): # Python 2
if len(self) < 1:
return False
if len(self._timestamps) != len(self._values):
return False
# Check if sorted
it = iter(self._timestamps)
it.next()
return all(b >= a for a, b in zip(self._timestamps, it))
def to_hash(self):
s = "{}.{}.{}.{}.{}.{}.{}.{}".format(self.key, self.item_type,
self.bucket_type, len(self),
self.ts_min, self.ts_max,
self.existing, self.dirty)
return hashlib.sha1(s).hexdigest()
def __eq__(self, other):
if not isinstance(other, Bucket):
return False
# Is Hashing a Performance Problem ?
# h1 = self.to_hash()
# h2 = other.to_hash()
# return h1 == h2
# This would compare the objects without hash
if self.key != other.key:
return False
if self._dirty != other._dirty:
return False
if self.item_type != other.item_type:
return False
if self.bucket_type != other.bucket_type:
return False
if len(self._timestamps) != len(other._timestamps):
return False
if len(self._timestamps) > 0:
if self._timestamps[0] != other._timestamps[0]:
return False
if self._timestamps[-1] != other._timestamps[-1]:
return False
return True
def __ne__(self, other):
return not self == other # NOT return not self.__eq__(other)
def __repr__(self):
l = len(self._timestamps)
if l > 0:
m = self._timestamps[0]
else:
m = -1
return "<{} series({}), min_ts: {}, items: {}, buckets: {}>".format(
self.key, l, m, self.item_type, self.bucket_type)
@property
def ts_max(self):
if len(self._timestamps) > 0:
return self._timestamps[-1]
return -1
@property
def ts_min(self):
if len(self._timestamps) > 0:
return self._timestamps[0]
return -1
def _at(self, i):
return (self._timestamps[i], self._values[i])
def __getitem__(self, key):
return self._at(key)
def to_string(self):
header = (struct.pack("H", int(self.item_type.value)) +
struct.pack("H", int(self.bucket_type.value)))
length = struct.pack("I", len(self))
return (header + length + self._timestamps.tostring() +
self._values.tostring())
@classmethod
def from_string(cls, key, string):
item_type = ItemType(int(struct.unpack("H", string[0:2])[0]))
bucket_type = BucketType(int(struct.unpack("H", string[2:4])[0]))
item_length = int(struct.unpack("I", string[4:8])[0])
split = 8 + 4 * item_length
ts, v = string[8:split], string[split:]
i = Bucket(key, item_type=item_type, bucket_type=bucket_type)
i._timestamps.fromstring(ts)
i._values.fromstring(v)
assert(i)
return i
def insert_point(self, timestamp, value, overwrite=False):
timestamp = int(timestamp)
idx = bisect.bisect_left(self._timestamps, timestamp)
# Append
if idx == len(self._timestamps):
self._timestamps.append(timestamp)
self._values.append(value)
self._dirty = True
return 1
# Already Existing
if self._timestamps[idx] == timestamp:
# Replace
logging.debug("duplicate insert")
if overwrite:
self._values[idx] = value
self._dirty = True
return 1
return 0
# Insert
self._timestamps.insert(idx, timestamp)
self._values.insert(idx, value)
self._dirty = True
return 1
def insert(self, series):
counter = 0
for timestamp, value in series:
counter += self.insert_point(timestamp, value)
return counter
class BucketCollection(OrderedDict):
def __init__(self, parent, *args, **kwargs):
self.parent = parent
super(BucketCollection, self).__init__(*args, **kwargs)
def __missing__(self, key):
k = self.parent.key
bucket = Bucket(self.parent, k, key)
self[key] = bucket
return self[key]
class TimeSeries(object):
DEFAULT_ITEMTYPE = ItemType.raw_float
DEFAULT_BUCKETTYPE = BucketType.daily
def __init__(self, key, values=None):
# Determine Types
# Maybe get this from key
self.item_type = self.DEFAULT_ITEMTYPE
self.bucket_type = self.DEFAULT_BUCKETTYPE
self.key = str(key).lower()
self.buckets = BucketCollection(self)
if values is not None:
self.insert(values)
def get_range_left(self, timestamp):
if self.bucket_type == BucketType.hourly:
return ts_hourly_left(timestamp)
elif self.bucket_type == BucketType.daily:
return ts_daily_left(timestamp)
elif self.bucket_type == BucketType.weekly:
return ts_weekly_left(timestamp)
elif self.bucket_type == BucketType.monthly:
return ts_monthly_left(timestamp)
else:
raise NotImplementedError("invalid bucket type")
def get_range_right(self, timestamp):
if self.bucket_type == BucketType.hourly:
return ts_hourly_right(timestamp)
elif self.bucket_type == BucketType.daily:
return ts_daily_right(timestamp)
elif self.bucket_type == BucketType.weekly:
return ts_weekly_right(timestamp)
elif self.bucket_type == BucketType.monthly:
return ts_monthly_right(timestamp)
else:
raise NotImplementedError("invalid bucket type")
def insert(self, series):
last_range_min = -1
last_range_max = -1
for timestamp, value in series:
if last_range_min <= timestamp <= last_range_max:
# just insert
self.buckets[last_range_min].insert_point(timestamp, value)
else:
l = self.get_range_left(timestamp)
r = self.get_range_right(timestamp)
if l < last_range_min or r < last_range_max:
raise ValueError("unsorted range key")
last_range_min = l
last_range_max = r
self.buckets[last_range_min].insert_point(timestamp, value)
@property
def timestamps(self):
bucket_timestamps = [x._timestamps for x in self.buckets.itervalues()]
return chain(bucket_timestamps)
@property
def values(self):
bucket_values = [x._values for x in self.buckets.itervalues()]
return chain(bucket_values)
def __len__(self):
return sum([len(x) for x in self.buckets.itervalues()])
def _at(self, i):
offset = 0
idx = 0
buckets = list(self.buckets.items())
current_bucket = buckets[idx]
while i >= len(current_bucket) + offset:
offset += len(current_bucket)
idx += 1
current_bucket = buckets[idx]
return current_bucket[i-offset]
def __getitem__(self, key):
return self._at(key)
class ResultSet(TimeSeries):
def __init__(self, key, items):
super(ResultSet, self).__init__(key)
self.bucket_type = BucketType.resultset
for i in items:
if i.key != key:
raise ValueError("Item has wrong key")
self._timestamps += i._timestamps
self._values += i._values
def _trim(self, ts_min, ts_max):
low = bisect.bisect_left(self._timestamps, ts_min)
high = bisect.bisect_right(self._timestamps, ts_max)
self._timestamps = self._timestamps[low:high]
self._values = self._values[low:high]
def all(self):
"""Return an iterater to get all ts value pairs.
"""
return zip(self._timestamps, self._values)
def daily(self):
"""Generator to access daily data.
This will return an inner generator.
"""
i = 0
while i < len(self._timestamps):
j = 0
lower_bound = ts_daily_left(self._timestamps[i])
upper_bound = ts_daily_right(self._timestamps[i])
while (i + j < len(self._timestamps) and
lower_bound <= self._timestamps[i + j] <= upper_bound):
j += 1
yield ((self._timestamps[x], self._values[x])
for x in range(i, i + j))
i += j
def hourly(self):
"""Generator to access hourly data.
This will return an inner generator.
"""
i = 0
while i < len(self._timestamps):
j = 0
lower_bound = ts_hourly_left(self._timestamps[i])
upper_bound = ts_hourly_right(self._timestamps[i])
while (i + j < len(self._timestamps) and
lower_bound <= self._timestamps[i + j] <= upper_bound):
j += 1
yield ((self._timestamps[x], self._values[x])
for x in range(i, i + j))
i += j
def aggregation(self, group="hourly", function="mean"):
"""Aggregation Generator.
"""
if group == "hourly":
it = self.hourly
left = ts_hourly_left
elif group == "daily":
it = self.daily
left = ts_daily_left
else:
raise ValueError("Invalid aggregation group")
if function == "sum":
func = sum
elif function == "count":
func = len
elif function == "min":
func = min
elif function == "max":
func = max
elif function == "amp":
def amp(x):
return max(x) - min(x)
func = amp
elif function == "mean":
def mean(x):
return sum(x) / len(x)
func = mean
else:
raise ValueError("Invalid aggregation group")
for g in it():
t = list(g)
ts = left(t[0][0])
value = func([x[1] for x in t])
yield (ts, value)
|
nilq/baby-python
|
python
|
'''
Code Challenge: Solve the Eulerian Cycle Problem.
Input: The adjacency list of an Eulerian directed graph.
Output: An Eulerian cycle in this graph.
'''
import random
import copy
with open('test1.txt','r') as f:
#with open('dataset_203_2.txt','r') as f:
adjacency_list = dict()
eulerian_edge_len = 0
muti_node = []
for i in f:
i = i.split('->')
left = int(i[0].strip())
right = i[1].strip()
if ',' in right:
muti_node.append(left)
right = right.split(',')
right = [int(x) for x in right]
eulerian_edge_len = eulerian_edge_len + len(right)
else:
eulerian_edge_len = eulerian_edge_len + len(right)
right = int(right)
adjacency_list[left] = right
def cycle_form(adjacency_list, start_point):
adjacency_list_temp = copy.deepcopy(adjacency_list)
cycle_nodes = [start_point]
start_node = start_point
for i in range(eulerian_edge_len):
next_node = adjacency_list_temp[start_point]
if type(next_node) == int:
cycle_nodes.append(next_node)
start_point = next_node
else:
next_node = random.choice(next_node)
adjacency_list_temp[start_point].remove(next_node)
cycle_nodes.append(next_node)
start_point = next_node
if start_point in muti_node:
if len(adjacency_list_temp[start_point]) == 0:
break
if cycle_nodes[-1] == cycle_nodes[0]:
if type(adjacency_list_temp[cycle_nodes[0]]) == int:
break
if len(cycle_nodes) < (eulerian_edge_len + 1):
remain_muti_node = []
for i in muti_node:
if i in cycle_nodes:
if len(adjacency_list_temp[i]) > 0:
remain_muti_node.append(i)
new_start = random.choice(remain_muti_node)
else:
new_start = None
return [cycle_nodes, new_start]
def eulerian_cycle(adjacency_list):
start_point = random.choice(list(adjacency_list.keys()))
cycle_result = cycle_form(adjacency_list, start_point)
cycle = cycle_result[0]
while len(cycle) < (eulerian_edge_len + 1):
new_start = cycle_result[1]
cycle_new = cycle_form(adjacency_list, new_start)
cycle = cycle_new[0]
return cycle
print(eulerian_cycle(adjacency_list))
|
nilq/baby-python
|
python
|
"""Comic Rereading Discord Bot"""
from .rereadbot import *
async def setup(bot):
"""Setup the DoA Cogs"""
bot.add_cog(DoaRereadCog(bot, envfile="./.env"))
|
nilq/baby-python
|
python
|
from datetime import datetime, timezone
import requests
from schemas import Contest
from spider.utils import update_platform
def main():
headers = {"x-requested-with": "XMLHttpRequest"}
resp = requests.get("https://csacademy.com/contests/", headers=headers)
json_data = resp.json()
data = []
tz = timezone.utc
for item in json_data["state"]["Contest"]:
if item.get("baseContestId"):
continue
contest_id = item["id"]
name = item["longName"]
link = "https://csacademy.com/contest/" + item["name"]
if isinstance(item.get("startTime", None), float):
start_time = datetime.fromtimestamp(item["startTime"], tz=tz)
else:
continue
if isinstance(item.get("endTime", None), float):
end_time = datetime.fromtimestamp(item["endTime"], tz=tz)
else:
continue
data.append(
Contest(
contest_id=contest_id,
name=name,
link=link,
start_time=start_time,
end_time=end_time,
)
)
update_platform("CSAcademy", data)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#
# This source code is licensed under the Apache 2 license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import cPickle as pickle
import numpy as np
import h5py
import random
import pandas as pd
from nltk.tokenize import TweetTokenizer
word_tokenize = TweetTokenizer().tokenize
import re
# IMPORTANT: Make sure the parameters below match the specification of the generated
# summaries (i.e. the params['summaries_filename'] variable) in terms of the state and
# and the dataset (i.e. params['dataset_location']) that will be loaded.
params = {
'state': 'test',
# 'state': 'validate',
'dataset_location': '../Datasets/ar/with_property_placeholders/',
# 'summaries_filename': './checkpoints/eo/with_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
# 'summaries_filename': './checkpoints/eo/without_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Validation.h5'
'summaries_filename': './checkpoints/ar/with_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
# 'summaries_filename': './checkpoints/ar/without_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
}
labels_file_location = '../Datasets/ar/Labels/labels_dict.p'
# We are only be displaying the most probable summary.
beamidx = 0
# The location that the output .csv will be stored.
summaries_dump_location = params['summaries_filename'].replace('h5', 'p')
# IMPORTANT: Leave the batch size unchanged
# It's the one with which we trained the models, and it should be the same
# with the one of the loaded pre-trained model that was used to generate the summaries
# (i.e. with beam-sample.lua). Change only if you train your own models using a
# different batch size.
batch_size = int(re.findall(r'(?<=batch_size_)(.*)(?=.beam_size)', params['summaries_filename'])[0])
beam_size = int(re.findall(r'(?<=beam_size_)(.*)(?=.summaries)', params['summaries_filename'])[0])
print('Parameters')
for key in params:
print('%s: %s' % (key, params[key]))
# Loading relevant dataset files.
summaries = h5py.File(params['summaries_filename'], 'r')
with open(params['dataset_location'] + 'summaries_dictionary.json', 'r') as f:
summaries_dictionary = json.load(f, 'utf-8')
id2word = summaries_dictionary['id2word']
id2word = {int(key): id2word[key] for key in id2word}
word2id = summaries_dictionary['word2id']
with open(params['dataset_location'] + 'triples_dictionary.json', 'r') as f:
triples_dictionary = json.load(f, 'utf-8')
max_num_triples = triples_dictionary['max_num_triples']
id2item = triples_dictionary['id2item']
id2item = {int(key): id2item[key] for key in id2item}
item2id = triples_dictionary['item2id']
# Loading supporting inverse dictionaries for surface forms and instance types.
with open(params['dataset_location'] + 'inv_surf_forms_dictionary.json', 'r') as f:
inv_surf_forms_tokens = json.load(f, encoding='utf-8')
with open(params['dataset_location'] + 'surf_forms_counts.p', 'rb') as f:
surf_forms_counts = pickle.load(f)
with open(params['dataset_location'] + 'inv_instance_types_with_predicates.json', 'r') as f:
inv_instancetypes_with_pred_dict = json.load(f, encoding='utf-8')
with open(params['dataset_location'] + 'splitDataset_with_targets.p', 'rb') as f:
splitDataset = pickle.load(f)
# Loading supporting labels_en dataset.
with open(labels_file_location, 'rb') as f:
labels = pickle.load(f)
print('All relevant dataset files from: %s have been successfully loaded.' % params['dataset_location'])
# Example of the structure of the supporting dictionaries:
# surf_form_counts[u'http://www.wikidata.org/entity/Q46611']: {u'Apollo-Programo': 10, u'Projekto Apollo': 6, u'projekto Apollo': 2}
# inv_surf_forms_tokens[u'#surFormToken71849']: [u'http://www.wikidata.org/entity/Q832222', u'Caprivi-streko']
# inv_instancetypes_with_pred_dict[u'#instanceTypeWithPredicate11']: u'http://www.wikidata.org/prop/direct/P138'
most_frequent_surf_form = {}
for entity in surf_forms_counts:
most_frequent_surf_form[entity] = sorted(surf_forms_counts[entity], key=lambda k: surf_forms_counts[entity][k], reverse=True)[0]
def tokenizeNumbers(inp_string):
tokens = word_tokenize(inp_string)
for j in range(0, len(tokens)):
try:
tempNumber = float(tokens[j].replace(',', ''))
if tempNumber // 1000 >= 1 and tempNumber // 1000 < 3:
tokens[j] = '<year> '
else:
tokens[j] = '0 '
except ValueError:
pass
# return detokenize(tokens, return_str=True) # detokenize has an issue with the non-latin characters.
return ' '.join(tokens)
def match_predicate_to_entity(token, triples, expressed_triples):
matched_entities = []
for tr in range(0, len(triples)):
if tr not in expressed_triples:
tempPredicate = triples[tr].split()[1]
if tempPredicate == token:
tempEntity = triples[tr].split()[-1]
if tempEntity == "<item>":
tempEntity == triples[tr].split()[0]
if tempEntity not in matched_entities:
matched_entities.append(tempEntity.decode('utf-8'))
if len(matched_entities) == 0:
token = '<resource>'
else:
random_selection = random.choice(matched_entities)
while random_selection not in labels and len(matched_entities) > 1:
matched_entities.remove(random_selection)
random_selection = random.choice(matched_entities)
if random_selection in labels:
if 'Datasets/ar/' in labels_file_location:
token = labels[random_selection].decode('unicode-escape')
else:
token = labels[random_selection]
expressed_triples.append(random_selection)
else:
token = '<resource>'
return token
def token_to_word(token, main_entity, triples, expressed_triples):
global summaries_type
if 'without_property_placeholders' in params['summaries_filename']:
assert ('#instanceTypeWithPredicate' not in token)
main_entity = main_entity
if "#surFormToken" in token:
word = inv_surf_forms_tokens[token[1:]][1] if "##surFormToken" in token else inv_surf_forms_tokens[token][1]
elif "#instanceTypeWithPredicate" in token:
word = match_predicate_to_entity(inv_instancetypes_with_pred_dict[token], triples, expressed_triples)
elif "#instanceType" in token:
word = inv_instancetypes_dict[token]
elif token == "<item>":
# The returned variable word is of type: unicode.
word = tokenizeNumbers(most_frequent_surf_form[main_entity])
else:
word = token
return word
output = {'Main-Item': [],
'index': [],
'number_original_triples': [],
'original_triples': [],
'number_input_triples': [],
'final_triples_with_types_reduced': [],
'final_triples_with_types': [],
'Target': [],
'Generated-Summary': []}
for batchidx in range(0, len(summaries['triples'])):
print('Post-processing summaries from %d. Batch...' % (batchidx + 1))
for instance in range(0, batch_size):
# Pay attention to the Python division at the np.round() function -- can seriously mess things up!
# More info at: https://stackoverflow.com/questions/28617841/rounding-to-nearest-int-with-numpy-rint-not-consistent-for-5
# We are using the built-in version of round which seems to be doing the trick for now.
splitDatasetIndex = int(round(instance * len(splitDataset[params['state']]['item']) / float(batch_size)) + batchidx)
mainItem = splitDataset[params['state']]['item'][splitDatasetIndex].decode('utf-8')
final_triples_with_types = []
for tr in range(0, len(splitDataset[params['state']]['final_triples_with_types'][splitDatasetIndex])):
tempTriple = splitDataset[params['state']]['final_triples_with_types'][splitDatasetIndex][tr]
if type(tempTriple) is not unicode:
tempTriple = tempTriple.decode('utf-8')
final_triples_with_types.append(tempTriple.replace('<item>', mainItem))
final_triples_with_types_reduced = []
for tr in range(0, len(splitDataset[params['state']]['final_triples_with_types_reduced'][splitDatasetIndex])):
# eq_used_for_training_triple: the triple as it was used by the neural network
# during training, validation and testing.
eq_used_for_training_triple = ' '.join([id2item[summaries['triples'][batchidx][tr][instance][j]] for j in range(0, 3)])
assert(splitDataset[params['state']]['final_triples_with_types_reduced'][splitDatasetIndex][tr] == eq_used_for_training_triple)
if eq_used_for_training_triple is not unicode:
eq_used_for_training_triple = eq_used_for_training_triple.decode('utf-8')
final_triples_with_types_reduced.append(eq_used_for_training_triple.replace('<item>', mainItem))
original_triples = []
for tr in range(0, len(splitDataset[params['state']]['triples'][splitDatasetIndex])):
tempTriple = splitDataset[params['state']]['triples'][splitDatasetIndex][tr]
if type(tempTriple) is not unicode:
tempTriple = tempTriple.decode('utf-8')
original_triples.append(tempTriple.replace('<item>', mainItem))
assert(len(final_triples_with_types) >= len(final_triples_with_types_reduced))
assert(len(final_triples_with_types) == len(original_triples))
expressed_triples = []
# We read from the tail of the argsort to find the elements
# with the highest probability.
selected_summary_index = np.argsort(summaries['probabilities'][:, batchidx * batch_size + instance])[::-1][beamidx]
summary = ''
i = 0
while summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i] != word2id['<end>']:
summary += ' ' + token_to_word(id2word[summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i]],
mainItem,
splitDataset[params['state']]['triples'][splitDatasetIndex],
expressed_triples)
if i == len(summaries['summaries'][selected_summary_index][batchidx * batch_size + instance]) - 1:
break
else:
i += 1
summary += ' ' + token_to_word(id2word[summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i]],
mainItem,
splitDataset[params['state']]['triples'][splitDatasetIndex],
expressed_triples)
# Appending everything to the dictionary of lists.
if id2item[0] not in summary[1:]:
output['index'].append((batchidx, instance))
output['number_original_triples'].append(len(original_triples))
output['original_triples'].append(original_triples)
output['number_input_triples'].append(len(final_triples_with_types_reduced))
output['final_triples_with_types_reduced'].append(final_triples_with_types_reduced)
output['final_triples_with_types'].append(final_triples_with_types)
output['Main-Item'].append(mainItem)
output['Target'].append(splitDataset[params['state']]['actual_target'][splitDatasetIndex])
output['Generated-Summary'].append(summary[1:])
# Saving all the generated summaries along with their input triples in a pickle file.
with open(summaries_dump_location, 'wb') as f:
pickle.dump(output, f)
print('The generated summaries have been successfully saved at: %s' % summaries_dump_location)
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.11 on 2019-11-18 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("course_catalog", "0052_userlistitem_contenttypes")]
operations = [
migrations.CreateModel(
name="Playlist",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=256)),
("short_description", models.TextField(blank=True, null=True)),
(
"_deprecated_offered_by",
models.CharField(
blank=True, db_column="offered_by", max_length=128, null=True
),
),
(
"image_description",
models.CharField(blank=True, max_length=1024, null=True),
),
("platform", models.CharField(max_length=40)),
("playlist_id", models.CharField(max_length=80)),
("image_src", models.URLField(blank=True, max_length=400, null=True)),
("url", models.URLField(max_length=2048, null=True)),
("published", models.BooleanField(default=True)),
("has_user_list", models.BooleanField(default=True)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PlaylistVideo",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("position", models.PositiveIntegerField()),
(
"playlist",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlist_videos",
to="course_catalog.Playlist",
),
),
(
"video",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlist_videos",
to="course_catalog.Video",
),
),
],
),
migrations.CreateModel(
name="VideoChannel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=256)),
("short_description", models.TextField(blank=True, null=True)),
(
"_deprecated_offered_by",
models.CharField(
blank=True, db_column="offered_by", max_length=128, null=True
),
),
("platform", models.CharField(max_length=40)),
("channel_id", models.CharField(max_length=80)),
("full_description", models.TextField(blank=True, null=True)),
("published", models.BooleanField(default=True)),
(
"offered_by",
models.ManyToManyField(
blank=True, to="course_catalog.LearningResourceOfferor"
),
),
(
"topics",
models.ManyToManyField(blank=True, to="course_catalog.CourseTopic"),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="playlist",
name="channel",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlists",
to="course_catalog.VideoChannel",
),
),
migrations.AddField(
model_name="playlist",
name="offered_by",
field=models.ManyToManyField(
blank=True, to="course_catalog.LearningResourceOfferor"
),
),
migrations.AddField(
model_name="playlist",
name="topics",
field=models.ManyToManyField(blank=True, to="course_catalog.CourseTopic"),
),
migrations.AddField(
model_name="playlist",
name="videos",
field=models.ManyToManyField(
through="course_catalog.PlaylistVideo", to="course_catalog.Video"
),
),
migrations.AlterUniqueTogether(
name="playlistvideo", unique_together={("playlist", "video")}
),
]
|
nilq/baby-python
|
python
|
"""Helpers to integrate the process on controlling profiles."""
from dataclasses import dataclass
from typing import List, Set, Optional
from bson import ObjectId
from flags import ProfilePermission, PermissionLevel
from mongodb.factory import ProfileManager, ChannelManager
from mongodb.helper import IdentitySearcher
from models import ChannelProfileModel
@dataclass
class ProfileControlEntry:
"""Single entry representing the profile control checking result."""
root_oid: ObjectId
name: str
controllable: bool
@dataclass
class ChannelProfileEntry:
"""Single entry representing a channel profile."""
profile: ChannelProfileModel
owner_names: List[str]
def __post_init__(self):
self.owner_names = sorted(self.owner_names)
class ProfileHelper:
"""Helper to process the profile data."""
@staticmethod
def get_user_profile_controls(
channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) \
-> List[ProfileControlEntry]:
"""
Check if the requester can perform certain actions on members who have the certain profile.
The **certain actions** mentioned above currently are:
- Control the profile attaching status
Actions are unable to perform on the users who have a higher permission level.
Actions also cannot be performed on default profile.
.. note::
This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``.
:param channel_model: channel data of the profile
:param profile_oid: OID of the profile
:param requester_oid: OID of the user who requested this check
:param permissions: permissions that the requester has
:return: list of `ProfileControlEntry` containing the check result
"""
ret = []
names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)
perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)
remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions
remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions
is_default = channel_model.config.default_profile_oid == profile_oid
user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())
for uid, name in sorted(names.items(), key=lambda item: item[1]):
if not name:
name = str(uid)
controllable = False
if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):
controllable = remove_self if uid == requester_oid else remove_member
ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))
return ret
@staticmethod
def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str] = None) -> List[ChannelProfileEntry]:
"""
Get a list of the channel profiles in ``channel_oid``.
``partial_name`` can be a part of the profile name.
:param channel_oid: channel to get the profiles
:param partial_name: keyword to get the profiles
:return: list of channel profiles
"""
ret = []
# Get channel profiles. Terminate if no available profiles
profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))
if not profs:
return ret
# Get channel data. Terminate if no channel data found
channel_model = ChannelManager.get_channel_oid(channel_oid)
if not channel_model:
return ret
# Get user names, and the prof-channel dict
user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])
user_oids = []
for _, onplat_oids in user_oids_dict.items():
user_oids.extend(onplat_oids)
user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)
for prof in profs:
uids = user_oids_dict.get(prof.id, [])
ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))
return ret
|
nilq/baby-python
|
python
|
import uos
import network
import socket
import select
import time
from machine import UART, Pin
ap_mode = False
recvPollers = []
sockets = []
clients = []
def socketSend(message):
for socket in sockets:
try:
socket.sendall(message)
except:
socket.close()
def generateDataPkg(text):
data = bytearray(b'\x3A\x00\x01')
data.extend(text.encode('utf8'))
for i in range(4-(len(text)%4)):
data.append(0)
data.append((~(sum(data)-58)+1)&0xFF)
return data
def generateDescPkg(dataPkg):
desc = bytearray(b'\x3a\x4e\x44\x64\x00\x01\x00\x01\x00\x00\x00\x00\x05\xff\x00')
desc[9], desc[11] = (len(dataPkg)-2,)*2
desc[14] = (~(sum(desc)-58)+1)&0xFF
return desc
def casioSend(descPkg, dataPkg):
uart.write(b'\x15')
uart.read(1)
uart.write(descPkg)
uart.read(1)
uart.write(dataPkg)
uart.read(1)
def handler(pin):
dataPkg = generateDataPkg("rtr")
descPkg = generateDescPkg(dataPkg)
casioSend(descPkg, dataPkg)
sta_if = network.WLAN(network.STA_IF)
ap_if = network.WLAN(network.AP_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
ap_if.active(False)
sta_if.connect('gurkenterror', 'saas1234')
while not sta_if.isconnected():
if sta_if.status() == 3:
print('network not available, starting ap')
sta_if.active(False)
ap_if.active(True)
ap_if.config(essid="gurkenterror", password="saas1234")
ap_mode = True
break
if ap_mode:
print('network config:', ap_if.ifconfig())
else:
print('network config:', sta_if.ifconfig())
if not ap_mode:
s = socket.socket()
print("connecting")
s.connect(('192.168.4.1', 65432))
print("connected")
clients = eval(s.recv(500))
print(clients)
sockets.append(s)
recvPoller = select.poll()
recvPoller.register(s, select.POLLIN)
recvPollers.append(recvPoller)
for client in clients:
s = socket.socket()
s.connect((client, 65432))
sockets.append(s)
recvPoller = select.poll()
recvPoller.register(s, select.POLLIN)
recvPollers.append(recvPoller)
listener = socket.socket()
listener.bind(("", 65432))
listener.listen(10)
print("listener started")
connPoller = select.poll()
connPoller.register(listener, select.POLLIN)
uos.dupterm(None, 1) # disable REPL on UART(0)
uart = UART(0, 38400)
uart.init(38400, bits=8, parity=None, stop=1, timeout=1000)
button = Pin(0, Pin.IN, Pin.PULL_UP)
button.irq(trigger=Pin.IRQ_FALLING, handler=handler)
# Main loop
while(True):
# Handle new connections
connEvents = connPoller.poll(100)
for descriptor, Event in connEvents:
print("Got an incoming connection request")
conn, addr = listener.accept()
print(conn, addr)
conn.sendall(str(clients))
sockets.append(conn)
clients.append(addr[0])
recvPoller = select.poll()
recvPoller.register(conn, select.POLLIN)
recvPollers.append(recvPoller)
# Handle new messsages for every socket
for recvPoller in recvPollers:
recvEvents = recvPoller.poll(100)
for descriptor, Event in recvEvents:
data = descriptor.recv(500)
print("Received: ", data)
descPkg = generateDescPkg(data)
casioSend(descPkg, data)
# Handle UART com
if uart.any() and uart.read(1) == b'\x15':
uart.write(b'\x13')
desc = uart.read(15)
uart.write(b'\x06')
msg = uart.read(desc[9]+2)
uart.write(b'\x06')
print("".join("%02x " % i for i in msg))
socketSend(msg)
try:
print("Received: ", msg[3:-2].decode("utf8"))
except:
print("not unicode")
|
nilq/baby-python
|
python
|
from importlib import import_module
def load_extensions(app):
for extension in app.config["EXTENSIONS"]:
module_name, factory = extension.split(":")
ext = import_module(module_name)
getattr(ext, factory)(app)
def load_blueprints(app):
for extension in app.config["BLUEPRINTS"]:
module_name, factory = extension.split(":")
ext = import_module(module_name)
getattr(ext, factory)(app)
def load_middlewares(app):
for middleware in reversed(app.config["MIDDLEWARES"]):
module_name, klass = middleware.split(":")
ext = import_module(module_name)
app.wsgi_app = getattr(ext, klass)(app.wsgi_app)
def init_app(app, settings_override=None):
app.config.from_object("settings")
if settings_override:
app.config.update(settings_override)
|
nilq/baby-python
|
python
|
#let's work on dictionaries
'''stuff = {'name':'Vivek', 'age':18, 'height':6*2}
print(stuff['name'])
print(stuff['age'])
print(stuff)
'''
'''
state = {
'Oregon' : 'OR',
'Florida' : 'FL',
'California': 'CA',
'New York' : 'NY',
'Michigan' : 'MI'
}
cities = {
'CA': 'California',
'NY' : 'New York',
'MI' : 'Michigan'
}
cities['OR'] = 'Oregon'
cities['FL'] = 'Florida'
print('-'*10)
print("NY state has : ",cities['NY'])
print('-'*10)'''
a = {
'a' : 'Monday',
'b' : 'Tuesday',
'c' : 'Wednesday',
'd' : 'Thursday',
'e' : 'Friday',
'f' : 'Saturday',
'g' : 'Sunday'
}
print(a)
for key,k in a.items():
print(key, k)
print("-"*10)
print(a.get('a',"Hi there"))
print(a.get('h', "Hello World"))
|
nilq/baby-python
|
python
|
from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.3/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
pose: The reference frame of the collision element, relative to the
reference frame of the link.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce velocity threshold, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
ode: ODE friction parameters
"""
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in the range of [0..1].
mu2: Second coefficient of friction in the range of
[0..1]
fdir1: 3-tuple specifying direction of mu1 in the
collision local reference frame.
slip1: Force dependent slip direction 1 in collision
local frame, between the range of [0..1].
slip2: Force dependent slip direction 2 in collision
local frame, between the range of [0..1].
"""
mu: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
ode: ODE contact parameters
"""
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
|
nilq/baby-python
|
python
|
import trio
from socket import (
inet_aton,
)
import pytest
import pytest_trio
from async_service import background_trio_service
from p2p.discv5.channel_services import (
DatagramReceiver,
DatagramSender,
Endpoint,
IncomingDatagram,
OutgoingDatagram,
OutgoingPacket,
PacketDecoder,
PacketEncoder,
)
from p2p.tools.factories import (
AuthTagPacketFactory,
EndpointFactory,
)
@pytest_trio.trio_fixture
async def socket_pair():
sending_socket = trio.socket.socket(
family=trio.socket.AF_INET,
type=trio.socket.SOCK_DGRAM,
)
receiving_socket = trio.socket.socket(
family=trio.socket.AF_INET,
type=trio.socket.SOCK_DGRAM,
)
# specifying 0 as port number results in using random available port
await sending_socket.bind(("127.0.0.1", 0))
await receiving_socket.bind(("127.0.0.1", 0))
return sending_socket, receiving_socket
@pytest.mark.trio
async def test_datagram_receiver(socket_pair):
sending_socket, receiving_socket = socket_pair
receiver_address = receiving_socket.getsockname()
sender_address = sending_socket.getsockname()
send_channel, receive_channel = trio.open_memory_channel(1)
async with background_trio_service(DatagramReceiver(receiving_socket, send_channel)):
data = b"some packet"
await sending_socket.sendto(data, receiver_address)
with trio.fail_after(0.5):
received_datagram = await receive_channel.receive()
assert received_datagram.datagram == data
assert received_datagram.sender_endpoint.ip_address == inet_aton(sender_address[0])
assert received_datagram.sender_endpoint.port == sender_address[1]
@pytest.mark.trio
async def test_datagram_sender(socket_pair):
sending_socket, receiving_socket = socket_pair
receiver_endpoint = receiving_socket.getsockname()
sender_endpoint = sending_socket.getsockname()
send_channel, receive_channel = trio.open_memory_channel(1)
async with background_trio_service(DatagramSender(receive_channel, sending_socket)):
outgoing_datagram = OutgoingDatagram(
b"some packet",
Endpoint(inet_aton(receiver_endpoint[0]), receiver_endpoint[1]),
)
await send_channel.send(outgoing_datagram)
with trio.fail_after(0.5):
data, sender = await receiving_socket.recvfrom(1024)
assert data == outgoing_datagram.datagram
assert sender == sender_endpoint
@pytest.mark.trio
async def test_packet_decoder():
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
service = PacketDecoder(datagram_receive_channel, packet_send_channel)
async with background_trio_service(service):
packet = AuthTagPacketFactory()
sender_endpoint = EndpointFactory()
await datagram_send_channel.send(IncomingDatagram(
datagram=packet.to_wire_bytes(),
sender_endpoint=sender_endpoint,
))
with trio.fail_after(0.5):
incoming_packet = await packet_receive_channel.receive()
assert incoming_packet.packet == packet
assert incoming_packet.sender_endpoint.ip_address == sender_endpoint.ip_address
assert incoming_packet.sender_endpoint.port == sender_endpoint.port
@pytest.mark.trio
async def test_packet_decoder_error():
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
service = PacketDecoder(datagram_receive_channel, packet_send_channel)
async with background_trio_service(service):
# send invalid packet
await datagram_send_channel.send(IncomingDatagram(
datagram=b"not a valid packet",
sender_endpoint=EndpointFactory(),
))
# send valid packet
packet = AuthTagPacketFactory()
sender_endpoint = EndpointFactory()
await datagram_send_channel.send(IncomingDatagram(
datagram=packet.to_wire_bytes(),
sender_endpoint=sender_endpoint,
))
# ignore the invalid one, only receive the valid one
with trio.fail_after(0.5):
incoming_packet = await packet_receive_channel.receive()
assert incoming_packet.packet == packet
assert incoming_packet.sender_endpoint.ip_address == sender_endpoint.ip_address
assert incoming_packet.sender_endpoint.port == sender_endpoint.port
@pytest.mark.trio
async def test_packet_encoder():
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
service = PacketEncoder(packet_receive_channel, datagram_send_channel)
async with background_trio_service(service):
receiver_endpoint = EndpointFactory()
outgoing_packet = OutgoingPacket(
packet=AuthTagPacketFactory(),
receiver_endpoint=receiver_endpoint,
)
await packet_send_channel.send(outgoing_packet)
with trio.fail_after(0.5):
outgoing_datagram = await datagram_receive_channel.receive()
assert outgoing_datagram.datagram == outgoing_packet.packet.to_wire_bytes()
assert outgoing_datagram.receiver_endpoint.ip_address == receiver_endpoint.ip_address
assert outgoing_datagram.receiver_endpoint.port == receiver_endpoint.port
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from dataclasses import dataclass
from dataclasses_io import dataclass_io
from pathlib import Path
_TEST_PATH = Path(__file__).parent
@dataclass_io
@dataclass
class _MyDataclass:
id: int
name: str
memo: str
if __name__ == "__main__":
dataclass1 = _MyDataclass(id=42, name="John Doe", memo="Hello, world!")
# {'id': 42, 'name': 'John Doe', 'memo': 'Hello, world!'}
print("dataclass1", dataclass1.config)
dataclass1.save(_TEST_PATH / "test.json")
dataclass2 = _MyDataclass.load(_TEST_PATH / "test.json")
print("dataclass2", dataclass2.config) # same as line 19
# dataclass1 and dataclass2 have the same properties, but refer to
# different memories. save() and load() operate well as intended.
print(f"dataclass1 == dataclass2: {dataclass1 == dataclass2}")
print(f"dataclass1 is dataclass2: {dataclass1 is dataclass2}")
|
nilq/baby-python
|
python
|
# 给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组。如果不存在符合条件的连续子数组,返回 0。
#
# 示例:
#
# 输入: s = 7, nums = [2,3,1,2,4,3]
# 输出: 2
# 解释: 子数组 [4,3] 是该条件下的长度最小的连续子数组。
# 进阶:
#
# 如果你已经完成了O(n) 时间复杂度的解法, 请尝试 O(n log n) 时间复杂度的解法。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/minimum-size-subarray-sum
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
if not nums:
return 0
left = 0
right = 0
ans = float('inf')
sum_of_nums = nums[0]
while left <= right < len(nums):
if sum_of_nums < s:
right += 1
if right < len(nums):
sum_of_nums += nums[right]
else:
ans = min(ans, right - left + 1)
sum_of_nums -= nums[left]
left += 1
return 0 if ans == float('inf') else ans
if __name__ == '__main__':
s = Solution()
assert s.minSubArrayLen(7, [2, 3, 1, 2, 4, 3]) == 2
assert s.minSubArrayLen(4, [1, 4, 4]) == 1
assert s.minSubArrayLen(11, [1, 2, 3, 4, 5]) == 3
|
nilq/baby-python
|
python
|
"""
Clean and validate a DataFrame column containing country names.
"""
from functools import lru_cache
from operator import itemgetter
from os import path
from typing import Any, Union
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
import regex as re
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, create_report_new, to_dask
COUNTRY_DATA_FILE = path.join(path.split(path.abspath(__file__))[0], "country_data.tsv")
DATA = pd.read_csv(COUNTRY_DATA_FILE, sep="\t", encoding="utf-8", dtype=str)
REGEXES = [re.compile(entry, re.IGNORECASE) for entry in DATA.regex]
def clean_country(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
input_format: str = "auto",
output_format: str = "name",
fuzzy_dist: int = 0,
strict: bool = False,
inplace: bool = False,
errors: str = "coerce",
report: bool = True,
progress: bool = True,
) -> pd.DataFrame:
"""
Clean and standardize country names.
Read more in the :ref:`User Guide <country_userguide>`.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing country names.
input_format
The ISO 3166 input format of the country.
- 'auto': infer the input format
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'auto')
output_format
The desired ISO 3166 format of the country:
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'name')
fuzzy_dist
The maximum edit distance (number of single character insertions, deletions
or substitutions required to change one word into the other) between a country value
and input that will count as a match. Only applies to 'auto', 'name' and 'official'
input formats.
(default: 0)
strict
If True, matching for input formats 'name' and 'official' are done by looking
for a direct match. If False, matching is done by searching the input for a
regex match.
(default: False)
inplace
If True, delete the column containing the data that was cleaned. Otherwise,
keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
report
If True, output the summary report. Otherwise, no report is outputted.
(default: True)
progress
If True, display a progress bar.
(default: True)
Examples
--------
>>> df = pd.DataFrame({'country': [' Canada ', 'US']})
>>> clean_country(df, 'country')
Country Cleaning Report:
2 values cleaned (100.0%)
Result contains 2 (100.0%) values in the correct format and 0 null values (0.0%)
country country_clean
0 Canada Canada
1 US United States
"""
# pylint: disable=too-many-arguments
input_formats = {"auto", "name", "official", "alpha-2", "alpha-3", "numeric"}
output_formats = {"name", "official", "alpha-2", "alpha-3", "numeric"}
if input_format not in input_formats:
raise ValueError(
f'input_format {input_format} is invalid, it needs to be one of "auto", '
'"name", "official", "alpha-2", "alpha-3" or "numeric'
)
if output_format not in output_formats:
raise ValueError(
f'output_format {output_format} is invalid, it needs to be "name", '
'"official", "alpha-2", "alpha-3" or "numeric'
)
if strict and fuzzy_dist > 0:
raise ValueError(
"can't do fuzzy matching while strict mode is enabled, "
"set strict=False for fuzzy matching or fuzzy_dist=0 for strict matching"
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [
_format_country(x, input_format, output_format, fuzzy_dist, strict, errors) for x in srs
],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
_code_=df["clean_code_tup"].map(itemgetter(1)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
# counts of codes indicating how values were changed
stats = df["_code_"].value_counts(sort=False)
df = df.drop(columns=["clean_code_tup", "_code_"])
if inplace:
df = df.drop(columns=column)
with ProgressBar(minimum=1, disable=not progress):
df, stats = dask.compute(df, stats)
# output a report describing the result of clean_country
if report:
create_report_new("Country", stats, errors)
return df
def validate_country(
x: Union[str, int, pd.Series], input_format: str = "auto", strict: bool = True
) -> Union[bool, pd.Series]:
"""
Validate country names.
Read more in the :ref:`User Guide <country_userguide>`.
Parameters
----------
x
pandas Series of countries or str/int country value.
input_format
The ISO 3166 input format of the country.
- 'auto': infer the input format
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'auto')
strict
If True, matching for input formats 'name' and 'official' are done by
looking for a direct match, if False, matching is done by searching
the input for a regex match.
(default: False)
Examples
--------
>>> validate_country('United States')
True
>>> df = pd.DataFrame({'country': ['Canada', 'NaN']})
>>> validate_country(df['country'])
0 True
1 False
Name: country, dtype: bool
"""
if isinstance(x, pd.Series):
x = x.astype(str).str.lower().str.strip()
return x.apply(_check_country, args=(input_format, strict, False))
x = str(x).lower().strip()
return _check_country(x, input_format, strict, False)
def _format_country(
val: Any,
input_format: str,
output_format: str,
fuzzy_dist: int,
strict: bool,
errors: str,
) -> Any:
"""
Function to transform a country instance into the desired format
The last component of the returned tuple contains a code indicating how the
input value was changed:
0 := the value is null
1 := the value could not be parsed
2 := the value is cleaned and the cleaned value is DIFFERENT than the input value
3 := the value is cleaned and is THE SAME as the input value (no transformation)
"""
# pylint: disable=too-many-arguments
# _check_country parses input value "val", and returns the index of the country
# in the DATA dataframe. The returned value "status" can be either "null"
# (which means val is a null value), "unknown" (in which case val
# could not be parsed) or "success" (a successful parse of the value).
country = str(val).lower().strip()
result_index, status = _check_country(country, input_format, strict, True)
if fuzzy_dist > 0 and status == "unknown" and input_format in ("auto", "name", "official"):
result_index, status = _check_fuzzy_dist(country, fuzzy_dist)
if status == "null":
return np.nan, 0
if status == "unknown":
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
return val if errors == "ignore" else np.nan, 1
result = DATA.loc[result_index, output_format]
if pd.isna(result):
# country doesn't have the required output format
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
return val if errors == "ignore" else np.nan, 1
return result, 2 if val != result else 3
@lru_cache(maxsize=2 ** 20)
def _check_country(country: str, input_format: str, strict: bool, clean: bool) -> Any:
"""
Finds the index of the given country in the DATA dataframe.
Parameters
----------
country
string containing the country value being cleaned
input_format
the ISO 3166 input format of the country
strict
If True, for input types "name" and "offical" the function looks for a direct match
in the DATA dataframe. If False, the country input is searched for a regex match.
clean
If True, a tuple (index, status) is returned.
If False, the function returns True/False to be used by the validate country function.
"""
if country in NULL_VALUES:
return (None, "null") if clean else False
if input_format == "auto":
input_format = _get_format_from_name(country)
if strict and input_format == "regex":
for form in ("name", "official"):
ind = DATA[
DATA[form].str.contains(f"^{re.escape(country)}$", flags=re.IGNORECASE, na=False)
].index
if np.size(ind) > 0:
return (ind[0], "success") if clean else True
elif not strict and input_format in ("regex", "name", "official"):
for index, country_regex in enumerate(REGEXES):
if country_regex.search(country):
return (index, "success") if clean else True
else:
ind = DATA[
DATA[input_format].str.contains(
f"^{re.escape(country)}$", flags=re.IGNORECASE, na=False
)
].index
if np.size(ind) > 0:
return (ind[0], "success") if clean else True
return (None, "unknown") if clean else False
@lru_cache(maxsize=2 ** 20)
def _check_fuzzy_dist(country: str, fuzzy_dist: int) -> Any:
"""
A match is found if a country has an edit distance <= fuzzy_dist
with a string that contains a match with one of the country regexes.
Find the index of a match with a minimum edit distance.
"""
results = []
for i, country_regex in enumerate(DATA.regex):
# {e<=fuzzy_dist} means the total number of errors
# (insertions, deletions and substitutions) must be <= fuzzy_dist,
# re.BESTMATCH looks for a match with minimum number of errors
fuzzy_regex = f"({country_regex}){{e<={fuzzy_dist}}}"
match = re.search(fuzzy_regex, country, flags=re.BESTMATCH | re.IGNORECASE)
if match:
# add total number of errors and the index to results
results.append((sum(match.fuzzy_counts), i))
if not results:
return None, "unknown"
return min(results)[1], "success"
def _get_format_from_name(name: str) -> str:
"""
Function to infer the input format. Used when the input format is auto.
"""
try:
int(name)
return "numeric"
except ValueError:
return "alpha-2" if len(name) == 2 else "alpha-3" if len(name) == 3 else "regex"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-15 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import jobs.models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0008_auto_20161115_0222'),
]
operations = [
migrations.AlterField(
model_name='additionalinformation',
name='resume',
field=models.FileField(blank=True, upload_to=jobs.models.get_file_path),
),
]
|
nilq/baby-python
|
python
|
from normality import normalize
def text_parts(text):
text = normalize(text, latinize=True)
if text is None:
return set()
return set(text.split(' '))
def index_text(proxy):
texts = set()
for name in proxy.names:
texts.update(text_parts(name))
return ' '.join(texts)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from nn_blocks import *
from torch import optim
import time
class DApredictModel(nn.Module):
def __init__(self, utt_vocab, da_vocab, tod_bert, config):
super(DApredictModel, self).__init__()
if config['DApred']['use_da']:
self.da_encoder = DAEncoder(da_input_size=len(da_vocab.word2id), da_embed_size=config['DApred']['DA_EMBED'],
da_hidden=config['DApred']['DA_HIDDEN'])
self.da_context = DAContextEncoder(da_hidden=config['DApred']['DA_HIDDEN'])
dec_hidden_size = config['DApred']['DA_HIDDEN']+config['DApred']['UTT_CONTEXT']*2+1+768 if config['DApred']['use_da'] else config['DApred']['UTT_CONTEXT']*2+1
self.da_decoder = DADecoder(da_input_size=len(da_vocab.word2id), da_embed_size=config['DApred']['DA_EMBED'],
da_hidden=dec_hidden_size)
self.utt_encoder = UtteranceEncoder(utt_input_size=len(utt_vocab.word2id), embed_size=config['DApred']['UTT_EMBED'],
utterance_hidden=config['DApred']['UTT_HIDDEN'], padding_idx=utt_vocab.word2id['<PAD>'])
self.utt_context = UtteranceContextEncoder(utterance_hidden_size=config['DApred']['UTT_CONTEXT']*2+1)
self.attention = Attention(self.utt_encoder.hidden_size*2)
self.criterion = nn.CrossEntropyLoss(ignore_index=0)
self.config = config
self.tod_bert = tod_bert
def forward(self, X_da, Y_da, X_utt, TC, turn, step_size):
"""
X_da: input sequence of DA, Tensor(window_size, batch_size, 1)
Y_da: gold DA, Tensor(batch_size, 1)
X_utt: input sentences, Tensor(window_size, batch_size, seq_len, 1)
turn: whether the next speaker equal to current speaker, Tensor(window_size, batch_size, 1)
"""
dec_hidden = self._encode(X_da=X_da, X_utt=X_utt, TC=TC, step_size=step_size, turn=turn)
decoder_output = self.da_decoder(dec_hidden) # (batch_size, 1, DA_VOCAB)
decoder_output = decoder_output.squeeze(1) # (batch_size, DA_VOCAB)
Y_da = Y_da.squeeze()
if self.config['use_weights']:
if self.config['use_freq']:
device = torch.device('cpu')
class_weights = [0, 0.499, 0.7621, 0.8918, 0.9002, 0.9799, 0.9881, 0.9879, 0.9904]
weights = torch.FloatTensor(class_weights).to(device)
w_criterion = nn.CrossEntropyLoss(weight=weights, ignore_index=0)
loss = w_criterion(decoder_output, Y_da)
#loss = self.criterion(decoder_output, Y_da)
if self.training:
loss.backward()
return loss.item(), decoder_output.data.cpu().numpy()
def predict(self, X_da, X_utt, TC, turn, step_size):
with torch.no_grad():
dec_hidden = self._encode(X_da=X_da, X_utt=X_utt, TC=TC, step_size=step_size, turn=turn)
decoder_output = self.da_decoder(dec_hidden) # (batch_size, 1, DA_VOCAB)
decoder_output = decoder_output.squeeze(1) # (batch_size, DA_VOCAB)
decoder_output = F.softmax(decoder_output, dim=-1)
return decoder_output.data.cpu().numpy()
def _encode(self, X_da, X_utt, TC, turn, step_size):
if self.config['DApred']['use_da']:
da_context_hidden = self.da_context.initHidden(step_size)
# da_contexts = []
for x_da in X_da:
da_encoder_hidden = self.da_encoder(x_da) # (batch_size, 1, DA_HIDDEN)
da_context_output, da_context_hidden = self.da_context(da_encoder_hidden, da_context_hidden) # (batch_size, 1, DA_HIDDEN)
# da_contexts.append(da_context_output)
# da_context_output = torch.stack(da_contexts).permute(0, 1)
if self.config['DApred']['use_utt'] and not self.config['DApred']['use_uttcontext']:
utt_encoder_hidden = self.utt_encoder.initHidden(step_size)
utt_encoder_output, utt_encoder_hidden = self.utt_encoder(X_utt[-1], utt_encoder_hidden) # (batch_size, 1, UTT_HIDDEN)
if self.config['DApred']['use_da']:
dec_hidden = torch.cat((da_context_output, utt_encoder_output), dim=-1)
else:
dec_hidden = utt_encoder_output
elif self.config['DApred']['use_uttcontext']:
# utt_contexts = []
utt_context_hidden = self.utt_context.initHidden(step_size)
for i in range(len(X_utt)):
utt_encoder_hidden = self.utt_encoder.initHidden(step_size)
utt_encoder_output, utt_encoder_hidden = self.utt_encoder(X_utt[i], utt_encoder_hidden) # (batch_size, 1, UTT_HIDDEN)
# utt_encoder_output = utt_encoder_output.sum(dim=1).unsqueeze(1)
attns = self.attention(utt_encoder_output)
utt_encoder_output = (utt_encoder_output * attns).sum(dim=1).unsqueeze(1)
utt_encoder_output = torch.cat((utt_encoder_output, turn[i].float().unsqueeze(-1)), dim=-1)
utt_context_output, utt_context_hidden = self.utt_context(utt_encoder_output, utt_context_hidden) # (batch_size, 1, UTT_HIDDEN)
# utt_contexts.append(utt_context_output)
# utt_context_output = torch.stack(utt_contexts).permute(0, 1)
if self.config['DApred']['use_da']:
dec_hidden = torch.cat((da_context_output, utt_context_output), dim=-1)
if self.config['use_tod']:
tod_context_encoding = self.tod_bert(TC, return_dict=True)
tod_features = tod_context_encoding['last_hidden_state']
#print('Tod features', tod_features.shape)
tod_context_output = tod_features[:,0,:].unsqueeze(1)
dec_hidden = torch.cat((dec_hidden, tod_context_output), dim=-1)
dec_hidden = self.utt_encoder.dropout(dec_hidden)
#dec_hidden = torch.cat((da_context_output, utt_context_output), dim=-1) # (batch_size, 1, DEC_HIDDEN)
if not self.config['DApred']['use_dacontext']:
dec_hidden = torch.cat((da_encoder_hidden, utt_context_output), dim=-1)
else:
dec_hidden = utt_context_output
else:
dec_hidden = da_context_output
return dec_hidden
|
nilq/baby-python
|
python
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.now(),
'email': ['chris@fregly.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 4, 24),
}
dag = DAG('undeploy_prediction_codegen', default_args=default_args)
# TODO: dockerFileTag and dockerFilePath should be passed in from webhook
switch_to_aws = BashOperator(
task_id='switch_to_aws',
bash_command='sudo kubectl config use-context awsdemo',
dag=dag)
undeploy_container_aws = BashOperator(
task_id='undeploy_container_to_aws',
bash_command='sudo kubectl delete prediction-codegen',
dag=dag)
switch_to_gcp = BashOperator(
task_id='switch_to_gcp',
bash_command='sudo kubectl config use-context gcpdemo',
dag=dag)
undeploy_container_gcp = BashOperator(
task_id='undeploy_container_gcp',
bash_command='sudo kubectl delete prediction-codegen',
dag=dag)
# Setup Airflow DAG
undeploy_container_aws.set_upstream(switch_to_aws)
switch_to_gcp.set_upstream(undeploy_container_aws)
undeploy_container_gcp.set_upstream(switch_to_gcp)
|
nilq/baby-python
|
python
|
from typing import Any, Tuple, Union
from lf3py.lang.annotation import FunctionAnnotation
from lf3py.routing.errors import UnresolvedArgumentsError
from lf3py.routing.types import Middleware
from lf3py.serialization.deserializer import Deserializer
from lf3py.serialization.errors import DeserializeError
from lf3py.task.data import Command
def resolve_args(middleware: Middleware, command: Command, dsn_spec: str) -> Union[Tuple[Any, dict], dict]:
try:
func_anno = FunctionAnnotation(middleware)
dsn_params = command.dsn.capture(dsn_spec)
dsn_kwargs = {
key: int(dsn_params[key]) if arg_anno.origin is int else dsn_params[key]
for key, arg_anno in func_anno.args.items()
if key in dsn_params
}
body_kwargs = {
key: command.data(arg_anno.origin)
for key, arg_anno in func_anno.args.items()
if key not in dsn_kwargs and not arg_anno.is_generics and issubclass(arg_anno.origin, Deserializer)
}
inject_kwargs = {**dsn_kwargs, **body_kwargs}
if func_anno.is_method:
return func_anno.receiver, inject_kwargs
else:
return inject_kwargs
except (DeserializeError, KeyError, ValueError) as e:
raise UnresolvedArgumentsError(e) from e
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding=utf8
from __future__ import unicode_literals
from datetime import timedelta
import collections
import functools
import os
import re
import string
from io import StringIO
import pytest
from hypothesis import given, settings, HealthCheck, assume
import hypothesis.strategies as st
import srt
REGISTER_SETTINGS = lambda name, **kwargs: settings.register_profile(
name, suppress_health_check=[HealthCheck.too_slow], deadline=None, **kwargs
)
REGISTER_SETTINGS("base")
REGISTER_SETTINGS("release", max_examples=1000)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "base"))
HOURS_IN_DAY = 24
TIMEDELTA_MAX_DAYS = 999999999
CONTENTLESS_SUB = functools.partial(
srt.Subtitle, index=1, start=timedelta(seconds=1), end=timedelta(seconds=2)
)
def is_strictly_legal_content(content):
"""
Filter out things that would violate strict mode. Illegal content
includes:
- A content section that starts or ends with a newline
- A content section that contains blank lines
"""
if content.strip("\r\n") != content:
return False
elif not content.strip():
return False
elif "\n\n" in content:
return False
else:
return True
def subs_eq(got, expected, any_order=False):
"""
Compare Subtitle objects using vars() so that differences are easy to
identify.
"""
got_vars = [frozenset(vars(sub).items()) for sub in got]
expected_vars = [frozenset(vars(sub).items()) for sub in expected]
if any_order:
assert collections.Counter(got_vars) == collections.Counter(expected_vars)
else:
assert got_vars == expected_vars
def timedeltas(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
"""
A Hypothesis strategy to generate timedeltas.
Right now {min,max}_value are shoved into multiple fields in timedelta(),
which is not very customisable, but it's good enough for our current test
purposes. If you need more precise control, you may need to add more
parameters to this function to be able to customise more freely.
"""
time_unit_strategy = st.integers(min_value=min_value, max_value=max_value)
timestamp_strategy = st.builds(
timedelta,
hours=time_unit_strategy,
minutes=time_unit_strategy,
seconds=time_unit_strategy,
)
return timestamp_strategy
def equivalent_timestamps(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
def string_timestamp(hours, minutes, seconds, msecs, paddings):
hours, minutes, seconds, msecs = map(
lambda v_and_p: "0" * v_and_p[1] + str(v_and_p[0]),
zip((hours, minutes, seconds, msecs), paddings),
)
return "{}:{}:{},{}".format(hours, minutes, seconds, msecs)
def ts_field_value():
return st.integers(min_value=min_value, max_value=max_value)
def zero_padding():
return st.integers(min_value=0, max_value=2)
@st.composite
def maybe_off_by_one_fields(draw):
field = draw(ts_field_value())
field_maybe_plus_one = draw(st.integers(min_value=field, max_value=field + 1))
return field_maybe_plus_one, field
def get_equiv_timestamps(h, m, s, ms2, ts1paddings, ts2paddings):
h2, h1 = h
m2, m1 = m
s2, s1 = s
ms1 = (
(h2 - h1) * 60 * 60 * 1000 + (m2 - m1) * 60 * 1000 + (s2 - s1) * 1000 + ms2
)
return (
string_timestamp(h2, m2, s2, ms2, ts2paddings),
string_timestamp(h1, m1, s1, ms1, ts1paddings),
)
return st.builds(
get_equiv_timestamps,
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
ts_field_value(),
st.tuples(*[zero_padding() for _ in range(4)]),
st.tuples(*[zero_padding() for _ in range(4)]),
)
def subtitles(strict=True):
"""A Hypothesis strategy to generate Subtitle objects."""
# max_value settings are just to avoid overflowing TIMEDELTA_MAX_DAYS by
# using arbitrary low enough numbers.
#
# We also skip subs with start time >= end time, so we split them into two
# groups to avoid overlap.
start_timestamp_strategy = timedeltas(min_value=0, max_value=500000)
end_timestamp_strategy = timedeltas(min_value=500001, max_value=999999)
# \r is not legal inside Subtitle.content, it should have already been
# normalised to \n.
content_strategy = st.text(min_size=1).filter(lambda x: "\r" not in x)
proprietary_strategy = st.text().filter(
lambda x: all(eol not in x for eol in "\r\n")
)
if strict:
content_strategy = content_strategy.filter(is_strictly_legal_content)
subtitle_strategy = st.builds(
srt.Subtitle,
index=st.integers(min_value=0),
start=start_timestamp_strategy,
end=end_timestamp_strategy,
proprietary=proprietary_strategy,
content=content_strategy,
)
return subtitle_strategy
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file(input_subs):
srt_file = StringIO(srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file_bom(input_subs):
srt_file = StringIO("\ufeff" + srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict(input_subs):
composed = srt.compose(input_subs, reindex=False)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_ending_blank_line(input_subs):
"""
Many sub editors don't add a blank line to the end, and many editors accept
it. We should just accept this too in input.
"""
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed[:-1]
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_eol_at_all(input_subs):
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed.rstrip("\r\n")
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.text().filter(is_strictly_legal_content))
def test_compose_and_parse_strict_mode(content):
# sub.content should not have OS-specific line separators, only \n
assume("\r" not in content)
content = "\n" + content + "\n\n" + content + "\n"
sub = CONTENTLESS_SUB(content=content)
parsed_strict = list(srt.parse(sub.to_srt()))[0]
parsed_unstrict = list(srt.parse(sub.to_srt(strict=False)))[0]
# Strict mode should remove blank lines in content, leading, and trailing
# newlines.
assert not parsed_strict.content.startswith("\n")
assert not parsed_strict.content.endswith("\n")
assert "\n\n" not in parsed_strict.content
# When strict mode is false, no processing should be applied to the
# content (other than \r\n becoming \n).
assert parsed_unstrict.content == sub.content.replace("\r\n", "\n")
@given(st.integers(min_value=1, max_value=TIMEDELTA_MAX_DAYS))
def test_timedelta_to_srt_timestamp_can_go_over_24_hours(days):
srt_timestamp = srt.timedelta_to_srt_timestamp(timedelta(days=days))
srt_timestamp_hours = int(srt_timestamp.split(":")[0])
assert srt_timestamp_hours == days * HOURS_IN_DAY
@given(subtitles())
def test_subtitle_equality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
assert sub_1 == sub_2
@given(subtitles())
def test_subtitle_inequality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
sub_2.index += 1
assert sub_1 != sub_2
@given(subtitles())
def test_subtitle_from_scratch_equality(subtitle):
srt_block = subtitle.to_srt()
# Get two totally new sets of objects so as not to affect the hash
# comparison
sub_1 = list(srt.parse(srt_block))[0]
sub_2 = list(srt.parse(srt_block))[0]
subs_eq([sub_1], [sub_2])
# In case subs_eq and eq disagree for some reason
assert sub_1 == sub_2
assert hash(sub_1) == hash(sub_2)
@given(st.lists(subtitles()))
def test_parsing_spaced_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_spaced_ender_arrow(subs):
# Seen in BSG subtitles
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "-- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_ws_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace(
" --> ", "-->"
)
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.text(string.whitespace), st.lists(subtitles()))
def test_parsing_leading_whitespace(ws, subs):
prews_block = ws + srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_negative_index(subs):
for sub in subs:
sub.index *= -1
prews_block = srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_content_with_blank_lines(subs):
for subtitle in subs:
# We stuff a blank line in the middle so as to trigger the "special"
# content parsing for erroneous SRT files that have blank lines.
subtitle.content = subtitle.content + "\n\n" + subtitle.content
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_content(subs):
for subtitle in subs:
subtitle.content = ""
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()), st.lists(subtitles()), st.text(alphabet="\n\r\t "))
def test_subs_missing_content_removed(content_subs, contentless_subs, contentless_text):
for sub in contentless_subs:
sub.content = contentless_text
subs = contentless_subs + content_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# We should have composed the same subs as there are in content_subs, as
# all contentless_subs should have been stripped.
subs_eq(composed_subs, content_subs, any_order=True)
# The subtitles should be reindexed starting at start_index, excluding
# contentless subs
default_start_index = 1
assert [sub.index for sub in composed_subs] == list(
range(default_start_index, default_start_index + len(composed_subs))
)
@given(
st.lists(subtitles()),
st.lists(subtitles()),
timedeltas(min_value=-999, max_value=-1),
)
def test_subs_starts_before_zero_removed(positive_subs, negative_subs, negative_td):
for sub in negative_subs:
sub.start = negative_td
sub.end = negative_td # Just to avoid tripping any start >= end errors
subs = positive_subs + negative_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# There should be no negative subs
subs_eq(composed_subs, positive_subs, any_order=True)
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex(input_subs, start_index):
for sub in input_subs:
# Pin all subs to same end time so that start time is compared only,
# must be guaranteed to be < sub.start, see how
# start_timestamp_strategy is done
sub.end = timedelta(500001)
reindexed_subs = list(
srt.sort_and_reindex(input_subs, start_index=start_index, in_place=True)
)
# The subtitles should be reindexed starting at start_index
assert [sub.index for sub in reindexed_subs] == list(
range(start_index, start_index + len(input_subs))
)
# The subtitles should be sorted by start time
expected_sorting = sorted(input_subs, key=lambda sub: sub.start)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles()))
def test_sort_and_reindex_no_skip(input_subs):
# end time > start time should not trigger a skip if skip=False
for sub in input_subs:
old_start = sub.start
sub.start = sub.end
sub.end = old_start
reindexed_subs = list(srt.sort_and_reindex(input_subs, skip=False))
# Nothing should have been skipped
assert len(reindexed_subs) == len(input_subs)
@given(st.lists(subtitles(), min_size=1))
def test_sort_and_reindex_same_start_time_uses_end(input_subs):
for sub in input_subs:
# Pin all subs to same start time so that end time is compared only
sub.start = timedelta(1)
reindexed_subs = list(srt.sort_and_reindex(input_subs, in_place=True))
# The subtitles should be sorted by end time when start time is the same
expected_sorting = sorted(input_subs, key=lambda sub: sub.end)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex_not_in_place_matches(input_subs, start_index):
# Make copies for both sort_and_reindex calls so that they can't affect
# each other
not_in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
nip_ids = [id(sub) for sub in not_in_place_subs]
ip_ids = [id(sub) for sub in in_place_subs]
not_in_place_output = list(
srt.sort_and_reindex(not_in_place_subs, start_index=start_index)
)
in_place_output = list(
srt.sort_and_reindex(in_place_subs, start_index=start_index, in_place=True)
)
# The results in each case should be the same
subs_eq(not_in_place_output, in_place_output)
# Not in place sort_and_reindex should have created new subs
assert not any(id(sub) in nip_ids for sub in not_in_place_output)
# In place sort_and_reindex should be reusing the same subs
assert all(id(sub) in ip_ids for sub in in_place_output)
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
# Put some garbage between subs that should trigger our failed parsing
# detection. Since we do some magic to try and detect blank lines that
# don't really delimit subtitles, it has to look at least a little like an
# SRT block.
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous_ignore_errors(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
# Should not raise, we have ignore_errors
list(srt.parse(composed, ignore_errors=True))
def _parseable_as_int(text):
try:
int(text)
except ValueError:
return False
return True
def _parseable_as_float(text):
try:
float(text)
except ValueError:
return False
return True
@given(st.lists(subtitles()), st.text(min_size=1))
def test_parser_noncontiguous_leading(subs, garbage):
# Issue #50 permits leading whitespace, see test_parsing_leading_whitespace
assume(not garbage.isspace())
# Issue #56 permits negative indexes, see test_parsing_negative_index. It
# also shouldn't just be a number, because then we'd confuse it with our
# index...
assume(garbage.strip()[0] != ".")
assume(garbage.strip()[0] != "-")
assume(not _parseable_as_int(garbage.strip()))
assume(not _parseable_as_float(garbage.strip()))
# Put some garbage at the beginning that should trigger our noncontiguity
# checks
composed = garbage + srt.compose(subs)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_didnt_match_to_end_raises(subs, fake_idx, garbage, fake_timedelta):
srt_blocks = [sub.to_srt() for sub in subs]
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
garbage = "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
srt_blocks.append(garbage)
composed = "".join(srt_blocks)
with pytest.raises(srt.SRTParseError) as thrown_exc:
list(srt.parse(composed))
# Since we will consume as many \n as needed until we meet the lookahead
# assertion, leading newlines in `garbage` will be stripped.
garbage_stripped = garbage.lstrip("\n")
assert garbage_stripped == thrown_exc.value.unmatched_content
assert len(composed) - len(garbage_stripped) == thrown_exc.value.expected_start
assert len(composed) == thrown_exc.value.actual_start
@given(st.lists(subtitles()))
def test_parser_can_parse_with_dot_msec_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
dot_timestamp = srt_lines[1].replace(",", ".", 2)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_dots = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_dots)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_fullwidth_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
dot_timestamp = srt_lines[1].replace(",", ",", 1).replace(":", ":", 1)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_fullwidth = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_fullwidth)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_no_msec(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
srt_lines[1] = re.sub(",[0-9]+", "", srt_lines[1], 2)
srt_blocks.append("\n".join(srt_lines))
composed = "".join(srt_blocks)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, subs)
@given(subtitles())
def test_repr_doesnt_crash(sub):
# Not much we can do here, but we should make sure __repr__ doesn't crash
# or anything and it does at least vaguely look like what we want
assert "Subtitle" in repr(sub)
assert str(sub.index) in repr(sub)
@given(subtitles(), subtitles())
def test_parser_accepts_final_no_newline_no_content(sub1, sub2):
# Limit size so we know how much to remove
sub2.content = ""
subs = [sub1, sub2]
# Remove the last newlines so that there are none. Cannot use rstrip since
# there might be other stuff that gets matched in proprietary
stripped_srt_blocks = srt.compose(subs, reindex=False)[:-2]
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_accepts_newline_no_content(subs):
for sub in subs:
# Limit size so we know how many lines to remove
sub.content = ""
# Remove the last \n so that there is only one
stripped_srt_blocks = "".join(sub.to_srt()[:-1] for sub in subs)
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict_crlf(input_subs):
composed_raw = srt.compose(input_subs, reindex=False)
composed = composed_raw.replace("\n", "\r\n")
reparsed_subs = list(srt.parse(composed))
for sub in reparsed_subs:
sub.content = sub.content.replace("\r\n", "\n")
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.one_of(st.just("\n"), st.just("\r\n")))
def test_compose_and_parse_strict_custom_eol(input_subs, eol):
composed = srt.compose(input_subs, reindex=False, eol=eol)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(equivalent_timestamps())
def test_equal_timestamps_despite_different_fields_parsed_as_equal(timestamps):
ts1, ts2 = timestamps
assert srt.srt_timestamp_to_timedelta(ts1) == srt.srt_timestamp_to_timedelta(ts2)
@given(timedeltas())
def test_bad_timestamp_format_raises(ts):
ts = srt.timedelta_to_srt_timestamp(ts)
ts = ts.replace(":", "t", 1)
with pytest.raises(srt.TimestampParseError):
srt.srt_timestamp_to_timedelta(ts)
@given(st.lists(subtitles()), st.lists(st.sampled_from(string.whitespace)))
def test_can_parse_index_trailing_ws(input_subs, whitespace):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "".join(whitespace)
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_parse_index_with_dot(input_subs):
# Seen in Battlestar Galactica subs
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "." + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.lists(st.just("0")))
def test_can_parse_index_leading_zeroes(input_subs, zeroes):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = "".join(zeroes) + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
|
nilq/baby-python
|
python
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
import argparse
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
import numpy
import scipy.stats
import torch
import torch.optim as optim
import jammy_flows
from jammy_flows import helper_fns
import pylab
from matplotlib import rc
import random
def seed_everything(seed_no):
random.seed(seed_no)
numpy.random.seed(seed_no)
torch.manual_seed(seed_no)
## Generate data that follows letter shapes using some TTF template
###################################################################
def sample_character(char, path='OpenSans-Bold.ttf', fontsize=60, width_per_cell=0.5, num_samples=1000, center_coords=(0,0), manifold_type="e"):
"""
Based on https://stackoverflow.com/a/27753869/190597 (jsheperd)
"""
font = ImageFont.truetype(path, fontsize)
w, h = font.getsize(char)
h *= 2
image = Image.new('L', (w, h), 1)
draw = ImageDraw.Draw(image)
draw.text((0, 0), char, font=font)
arr = np.asarray(image)
arr = np.where(arr, 0, 1)
arr = arr[(arr != 0).any(axis=1)]
one_mask=arr.T==1
num_x_cells=one_mask.shape[0]
num_y_cells=one_mask.shape[1]
## discretized random sampling that follows letter shape
xvals, yvals=np.meshgrid(np.arange(one_mask.shape[0]), np.arange(one_mask.shape[1]))
xvals=xvals.T.astype('float64')
yvals=yvals.T.astype('float64')
xvals-=num_x_cells//2
yvals-=num_y_cells//2
# add some extra noise
xvals+=np.random.normal(size=xvals.shape)
yvals+=np.random.normal(size=yvals.shape)
xvals*=width_per_cell
yvals*=width_per_cell*(-1.0) ## have to flip y
one_coords=np.hstack([xvals[one_mask][:,None], yvals[one_mask][:,None]])
sample_indices=np.random.choice(len(one_coords), num_samples)
samples=one_coords[sample_indices]
samples[:,0]+=center_coords[0]
samples[:,1]+=center_coords[1]
## scale azimuth to make it similar to zenith
if(manifold_type=="s"):
azi_diff=(samples[:,1]-numpy.pi)
samples[:,1]=numpy.pi+azi_diff*2
return samples
## this function generates train and test data
def sample_data(pdf_def, sentence, num_samples=10000):
words=sentence.split(" ")
num_words=len(words)
last_len=len(words[0])
for w in words:
if(len(w)!=last_len):
raise Exception("All words in sentence must be of same length")
## every char takes 2 dimensions
manifold_str=""
len_per_word=0
pdf_dim=0
for pdf in pdf_def.split("+"):
if(int(pdf[1:])%2!=0):
raise Exception("Characters take 2 dimensions, so string is visualized with 2*len(str) dims. Every PDF must have a dimension divisible by 2 for simplicity.")
len_per_word=int(pdf[1:])//2
pdf_dim+=int(pdf[1:])
if("e" in pdf):
manifold_str+=len_per_word*"e"
elif("s" in pdf):
manifold_str+=len_per_word*"s"
word_indices=np.random.choice(num_words, num_samples)
_, class_occurences = np.unique(word_indices, return_counts=True)
labels=torch.randn( (num_samples, pdf_dim)).type(torch.float64)
## loop words
for w_index, w in enumerate(words):
this_w_sample=[]
## loop char per word
for c_index, c in enumerate(w):
center=(0,0)
stretch=0.5
## if sphere, center character at equator
if(manifold_str[c_index]=="s"):
center=(np.pi/2.0, np.pi)
stretch=0.05
res=sample_character(c, num_samples=class_occurences[w_index], width_per_cell=stretch, center_coords=center, manifold_type=manifold_str[c_index])
if(manifold_str[c_index]=="s"):
assert( ((res[:,0]<0) | (res[:,0]>np.pi)).sum()==0)
assert( ((res[:,1]<0) | (res[:,1]>2*np.pi)).sum()==0)
this_w_sample.append(torch.from_numpy(res))
tot_sample=torch.cat(this_w_sample, dim=1)
labels[word_indices==w_index]=tot_sample
onehot_input = torch.nn.functional.one_hot(torch.from_numpy(word_indices), num_words).type(torch.float64)
return onehot_input, labels
#######################################################################
## plot the model during training
def plot_test(test_data, test_labels, model, words, fname="figs/test.png"):
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
num_words=len(torch.unique(test_data, dim=0))
fig=pylab.figure(figsize=((num_words+1)*4, 4))
gridspec=fig.add_gridspec(1, num_words+1)
word_ids=torch.nn.functional.one_hot(torch.arange(num_words), num_words).type(torch.float64)
## 2 * log_pdf differences
pdf_res, base_pdf_res, _=model(test_labels)#, conditional_input=test_data)
dim=test_labels.shape[1]
glob_dim_index=0
bounds=[]
bmin=9999
bmax=-9999
mask=[]
for pdf_str in model.pdf_defs_list:
this_dim=int(pdf_str[1:])
this_type=pdf_str[0]
if(this_type=="e"):
for ind in range(this_dim):
this_min=test_labels.detach().numpy()[:,glob_dim_index].min()
this_max=test_labels.detach().numpy()[:,glob_dim_index].max()
if(this_min<bmin):
bmin=this_min
if(this_max>bmax):
bmax=this_max
glob_dim_index+=1
else:
glob_dim_index+=2
continue
sphere_plot_type="standard"
for pdf_str in model.pdf_defs_list:
this_dim=int(pdf_str[1:])
this_type=pdf_str[0]
if(this_type=="s"):
if(sphere_plot_type=="standard"):
bounds.append([0,np.pi])
bounds.append([0,2*np.pi])
else:
bounds.append([-2,2])
bounds.append([-2,2])
glob_dim_index+=2
else:
for ind in range(this_dim):
bounds.append([bmin,bmax])
logpz_max= scipy.stats.multivariate_normal.logpdf( dim*[0], mean=dim*[0])
twice_pdf_diff=2*(logpz_max - base_pdf_res)
coverage_probs=np.linspace(0.01,0.99,100)
true_twice_llhs=scipy.stats.chi2.ppf(coverage_probs, df=dim)
## plot PDF for individual "word input data"
colors=pylab.cm.tab10.colors
cov_ax=fig.add_subplot(gridspec[0,num_words])
for word_index, wid in enumerate(word_ids):
helper_fns.visualize_pdf(model, fig, gridspec=gridspec[0,word_index], conditional_input=None, total_pdf_eval_pts=2000, nsamples=10000, contour_probs=[], hide_labels=True,bounds=bounds,s2_norm=sphere_plot_type)
## plot coverage
this_coverage=twice_pdf_diff[(wid[word_index]==test_data[:,word_index])]
act_cov=[]
for ind,true_cov in enumerate(coverage_probs):
act_cov.append(sum(this_coverage<true_twice_llhs[ind])/float(len(this_coverage)))
cov_ax.plot(coverage_probs, act_cov, label=r"$p(x|'%s')$" % words[word_index], color=colors[word_index])
cov_ax.plot([0.0,1.0],[0.0,1.0], color="k", lw=2.0, ls="--")
cov_ax.set_xlim(0,1)
cov_ax.set_ylim(0,1)
cov_ax.grid(True)
cov_ax.legend(loc="upper right")
cov_ax.set_title("Coverage")
fig.suptitle("pdf structure: %s" % "+".join(model.pdf_defs_list))
fig.tight_layout()
fig.savefig(fname)
pylab.close(fig)
#test_evals, standard_normal_base_evals, _=model(test_labels, conditional_input=test_data)
############################
if __name__ == "__main__":
parser = argparse.ArgumentParser('train_example')
parser.add_argument("-sentence", type=str, default="JAMMY FLOWS")
parser.add_argument("-pdf_def", type=str, default="e4+s2+e4")
parser.add_argument("-layer_def", type=str, default="gggg+n+gggg")
parser.add_argument("-train_size", type=int, default=200000)
parser.add_argument("-batch_size", type=int, default=20)
parser.add_argument("-test_size", type=int, default=1000)
parser.add_argument("-lr", type=float, default=0.001)
args=parser.parse_args()
seed_everything(1)
assert(args.train_size % args.batch_size==0)
## train data used for training
train_data, train_labels=sample_data(args.pdf_def, args.sentence, num_samples=args.train_size)
## test used to calculate coverage
test_data, test_labels=sample_data(args.pdf_def, args.sentence, num_samples=args.test_size)
extra_flow_defs=dict()
extra_flow_defs["n"]=dict()
extra_flow_defs["n"]["kwargs"]=dict()
extra_flow_defs["n"]["kwargs"]["zenith_type_layers"]="g"
extra_flow_defs["n"]["kwargs"]["use_extra_householder"]=0
word_pdf=jammy_flows.pdf(args.pdf_def, args.layer_def, conditional_input_dim=None, hidden_mlp_dims_sub_pdfs="128",flow_defs_detail=extra_flow_defs, use_custom_low_rank_mlps=False,
custom_mlp_highway_mode=4)
word_pdf.count_parameters(verbose=True)
## initalize params with test sample (only advantage gains for Gaussianization flows)
word_pdf.init_params(data=test_labels)
## start training loop
num_batches=args.train_size//args.batch_size
num_epochs=300
plot_every_n=200
glob_counter=0
cur_lr=args.lr
for ep_id in range(num_epochs):
optimizer = optim.Adam(word_pdf.parameters(), lr=cur_lr)
for batch_id in range(num_batches):
## get new batch
batch_data, batch_labels=train_data[batch_id*args.batch_size:batch_id*args.batch_size+args.batch_size], train_labels[batch_id*args.batch_size:batch_id*args.batch_size+args.batch_size]
## reset accumulated grad
optimizer.zero_grad()
## evaluate PDF
log_pdf, _,_=word_pdf(batch_labels)#, conditional_input=batch_data)
## neg log-loss
loss=-log_pdf.mean()
print("loss ", loss)
## backprop
loss.backward()
## take a gradient step
optimizer.step()
## plot test data
if(glob_counter%plot_every_n==0):
with torch.no_grad():
print("VALIDATION EVAL")
val_log_pdf, _, _=word_pdf(test_labels)#, conditional_input=test_data)
val_loss=-val_log_pdf.mean()
print("ep: %d / batch_id: %d / val-loss %.3f" % (ep_id, batch_id, val_loss))
print("before plotting")
print("----------------------------->")
plot_test(test_data, test_labels, word_pdf, args.sentence.split(" "), fname="./figs/%.6d.png" % glob_counter)
glob_counter+=1
cur_lr*=0.9
|
nilq/baby-python
|
python
|
from m5.params import *
from m5.SimObject import SimObject
from Controller import RubyController
class PMMU(RubyController):
type = 'PMMU'
cxx_class = 'PMMU'
cxx_header = "mem/spm/pmmu.hh"
# version = Param.Int("");
page_size_bytes = Param.Int(512,"Size of a SPM page in bytes")
ruby_system = Param.RubySystem(NULL, "")
responseFromSPM = Param.MessageBuffer("");
responseToSPM = Param.MessageBuffer("");
requestFromSPM = Param.MessageBuffer("");
requestToSPM = Param.MessageBuffer("");
responseToNetwork = Param.MessageBuffer("");
requestToNetwork = Param.MessageBuffer("");
governor = Param.BaseGovernor("")
gov_type = Param.String("Local", "Governor type")
spm_s_side = SlavePort("Slave port where SPM pushes requests/responses")
spm_m_side = MasterPort("Master port to send requests/responses to SPM")
# system = Param.System(Parent.any, "System we belong to")
# system = Param.System("System we belong to")
# spm_memory = Param.SPM("")
# cache_memory = Param.BaseCache("")
|
nilq/baby-python
|
python
|
import requests
import mimetypes
import hashlib
class Tebi:
def __init__(self, bucket, **kwargs):
self.bucket = "https://" + bucket
self.auth = kwargs.get('auth', None)
if (self.auth):
self.auth = "TB-PLAIN " + self.auth
def GetObject(self, key):
headers = {}
if (self.auth):
headers["Authorization"] = self.auth
response = requests.get(self.bucket+"/"+key, headers=headers)
return response
def PutObject(self, key, obj, **kwargs):
file = kwargs.get('file', None)
mime = kwargs.get('ContentType', None)
auth = kwargs.get('auth', self.auth)
CacheControl = kwargs.get('CacheControl', None)
data = obj
if (mime != None and mime == "auto" and file != None):
mime = mimetypes.guess_type(file)[0]
headers = {}
if (mime != None):
headers["Content-Type"] = mime
if (CacheControl != None):
headers["Cache-Control"] = CacheControl
if (self.auth):
headers["Authorization"] = auth
if (file and not data):
data = open(file, "rb")
headers["Content-MD5"] = hashlib.md5(data).hexdigest()
response = requests.put(self.bucket + +"/"+key, headers=headers)
return response
def ListObjects(self, key, **kwargs):
auth = kwargs.get('auth', self.auth)
headers = {
"Authorization": auth
}
response = requests.get(self.bucket+"/?"+key, headers=headers)
return response
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function, unicode_literals
import cwltool.main
import pkg_resources
import signal
import sys
import logging
from cwl_tes.tes import TESWorkflow
from cwl_tes.__init__ import __version__
log = logging.getLogger("tes-backend")
log.setLevel(logging.INFO)
console = logging.StreamHandler()
# formatter = logging.Formatter("[%(asctime)s]\t[%(levelname)s]\t%(message)s")
# console.setFormatter(formatter)
log.addHandler(console)
def versionstring():
pkg = pkg_resources.require("cwltool")
if pkg:
cwltool_ver = pkg[0].version
else:
cwltool_ver = "unknown"
return "%s %s with cwltool %s" % (sys.argv[0], __version__, cwltool_ver)
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = cwltool.main.arg_parser()
parser = add_args(parser)
parsed_args = parser.parse_args(args)
if not len(args) >= 1:
print(versionstring())
print("CWL document required, no input file was provided")
parser.print_usage()
return 1
if parsed_args.version:
print(versionstring())
return 0
if parsed_args.tes is None:
print(versionstring())
parser.print_usage()
print("cwl-tes: error: argument --tes is required")
return 1
if parsed_args.quiet:
log.setLevel(logging.WARN)
if parsed_args.debug:
log.setLevel(logging.DEBUG)
blacklist_false = ["no_container", "disable_pull", "disable_net",
"custom_net", "no_match_user"]
for f in blacklist_false:
if vars(parsed_args).get(f):
log.warning("arg: '%s' has no effect in cwl-tes" % (f))
blacklist_true = ["enable_pull"]
for f in blacklist_true:
if not vars(parsed_args).get(f):
log.warning("arg: '%s' has no effect in cwl-tes" % (f))
# custom
if not parsed_args.rm_container:
log.warning("arg: 'leave_container' has no effect in cwl-tes")
tes_workflow = TESWorkflow(parsed_args.tes, vars(parsed_args))
# setup signal handler
def signal_handler(*args):
log.info(
"recieved control-c signal"
)
log.info(
"terminating thread(s)..."
)
log.warning(
"remote TES processes %s may keep running" %
([t.id for t in tes_workflow.threads])
)
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
return cwltool.main.main(
args=parsed_args,
executor=tes_workflow.executor,
makeTool=tes_workflow.make_tool,
versionfunc=versionstring,
logger_handler=console
)
def add_args(parser):
parser.add_argument(
"--tes",
type=str,
help="GA4GH TES Service URL"
)
return parser
if __name__ == "__main__":
sys.exit(main())
|
nilq/baby-python
|
python
|
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.utils.timezone import now
from rest_framework import generics
from bluebottle.bluebottle_drf2.pagination import BluebottlePagination
from bluebottle.clients import properties
from .models import Page
from .serializers import PageSerializer
class PageList(generics.ListAPIView):
queryset = Page.objects.all()
serializer_class = PageSerializer
pagination_class = BluebottlePagination
def get_queryset(self):
qs = super(PageList, self).get_queryset()
# Set language if supplied
language = self.kwargs.get('language', None)
if language:
qs = qs.filter(language=language)
qs = qs.filter(status=Page.PageStatus.published)
qs = qs.filter(publication_date__lte=now())
qs = qs.filter(Q(publication_end_date__gte=now()) |
Q(publication_end_date__isnull=True))
return qs
class PageDetail(generics.RetrieveAPIView):
queryset = Page.objects.all()
serializer_class = PageSerializer
def get_queryset(self):
qs = super(PageDetail, self).get_queryset()
qs = qs.filter(status=Page.PageStatus.published)
qs = qs.filter(publication_date__lte=now())
qs = qs.filter(Q(publication_end_date__gte=now()) |
Q(publication_end_date__isnull=True))
return qs
def get_object(self, queryset=None):
queryset = self.get_queryset()
try:
return queryset.get(
language=self.kwargs['language'],
slug=self.kwargs['slug']
)
except ObjectDoesNotExist:
try:
return queryset.get(
language=properties.LANGUAGE_CODE,
slug=self.kwargs['slug']
)
except ObjectDoesNotExist:
raise Http404
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.