text
stringlengths 2
999k
|
|---|
import pandas as pd
f = open('../data/booklist.txt','r', encoding='UTF-8')
while True:
line = f.readline()
if not line : break
print(line)
f.close()
user_id, book_id, score = input("사용자 id, 책 id, 평점을 입력해주세요.\n").split(',')
print(user_id)
print(book_id)
print(score)
print(line)
|
#!/usr/bin/env python
# Copyright 2014-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import shutil
import os
from os.path import join, exists
from .recorder import Recorder, VerifyError
from PIL import Image
class TestRecorder(unittest.TestCase):
def setUp(self):
self.outputdir = tempfile.mkdtemp()
self.inputdir = tempfile.mkdtemp()
self.tmpimages = []
self.recorder = Recorder(self.inputdir, self.outputdir)
def create_temp_image(self, name, dimens, color):
im = Image.new("RGBA", dimens, color)
filename = os.path.join(self.inputdir, name)
im.save(filename, "PNG")
im.close()
return filename
def make_metadata(self, str):
with open(os.path.join(self.inputdir, "metadata.xml"), "w") as f:
f.write(str)
def tearDown(self):
for f in self.tmpimages:
f.close()
shutil.rmtree(self.outputdir)
shutil.rmtree(self.inputdir)
def test_create_temp_image(self):
im = self.create_temp_image("foobar", (100, 10), "blue")
self.assertTrue(os.path.exists(im))
def test_recorder_creates_dir(self):
shutil.rmtree(self.outputdir)
self.make_metadata("""<screenshots></screenshots>""")
self.recorder.record()
self.assertTrue(os.path.exists(self.outputdir))
def test_single_input(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>1</tile_width>
<tile_height>1</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
self.assertTrue(exists(join(self.outputdir, "foobar.png")))
def test_two_files(self):
self.create_temp_image("foo.png", (10, 10), "blue")
self.create_temp_image("bar.png", (10, 10), "red")
self.make_metadata("""<screenshots>
<screenshot>
<name>foo</name>
<tile_width>1</tile_width>
<tile_height>1</tile_height>
</screenshot>
<screenshot>
<name>bar</name>
<tile_width>1</tile_width>
<tile_height>1</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
self.assertTrue(exists(join(self.outputdir, "foo.png")))
self.assertTrue(exists(join(self.outputdir, "bar.png")))
def test_one_col_tiles(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.create_temp_image("foobar_0_1.png", (10, 10), "red")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>1</tile_width>
<tile_height>2</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
with Image.open(join(self.outputdir, "foobar.png")) as im:
(w, h) = im.size
self.assertEqual(10, w)
self.assertEqual(20, h)
self.assertEqual((0, 0, 255, 255), im.getpixel((1, 1)))
self.assertEqual((255, 0, 0, 255), im.getpixel((1, 11)))
def test_one_row_tiles(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.create_temp_image("foobar_1_0.png", (10, 10), "red")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>2</tile_width>
<tile_height>1</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
with Image.open(join(self.outputdir, "foobar.png")) as im:
(w, h) = im.size
self.assertEqual(20, w)
self.assertEqual(10, h)
self.assertEqual((0, 0, 255, 255), im.getpixel((1, 1)))
self.assertEqual((255, 0, 0, 255), im.getpixel((11, 1)))
def test_fractional_tiles(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.create_temp_image("foobar_1_0.png", (9, 10), "red")
self.create_temp_image("foobar_0_1.png", (10, 8), "red")
self.create_temp_image("foobar_1_1.png", (9, 8), "blue")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>2</tile_width>
<tile_height>2</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
with Image.open(join(self.outputdir, "foobar.png")) as im:
(w, h) = im.size
self.assertEqual(19, w)
self.assertEqual(18, h)
self.assertEqual((0, 0, 255, 255), im.getpixel((1, 1)))
self.assertEqual((255, 0, 0, 255), im.getpixel((11, 1)))
self.assertEqual((0, 0, 255, 255), im.getpixel((11, 11)))
self.assertEqual((255, 0, 0, 255), im.getpixel((1, 11)))
def test_verify_success(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>1</tile_width>
<tile_height>1</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
self.recorder.verify()
def test_verify_failure(self):
self.create_temp_image("foobar.png", (10, 10), "blue")
self.make_metadata("""<screenshots>
<screenshot>
<name>foobar</name>
<tile_width>1</tile_width>
<tile_height>1</tile_height>
</screenshot>
</screenshots>""")
self.recorder.record()
os.unlink(join(self.inputdir, "foobar.png"))
self.create_temp_image("foobar.png", (10, 10), "red")
try:
self.recorder.verify()
self.fail("expected exception")
except VerifyError:
pass # expected
if __name__ == '__main__':
unittest.main()
|
import json
import time
import unittest
from mock import Mock, patch
from patroni.dcs.kubernetes import Kubernetes, KubernetesError, k8s_client, RetryFailedError
from threading import Thread
from . import SleepException
def mock_list_namespaced_config_map(*args, **kwargs):
metadata = {'resource_version': '1', 'labels': {'f': 'b'}, 'name': 'test-config',
'annotations': {'initialize': '123', 'config': '{}'}}
items = [k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata))]
metadata.update({'name': 'test-leader', 'annotations': {'optime': '1234', 'leader': 'p-0', 'ttl': '30s'}})
items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata)))
metadata.update({'name': 'test-failover', 'annotations': {'leader': 'p-0'}})
items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata)))
metadata.update({'name': 'test-sync', 'annotations': {'leader': 'p-0'}})
items.append(k8s_client.V1ConfigMap(metadata=k8s_client.V1ObjectMeta(**metadata)))
metadata = k8s_client.V1ObjectMeta(resource_version='1')
return k8s_client.V1ConfigMapList(metadata=metadata, items=items, kind='ConfigMapList')
def mock_list_namespaced_endpoints(*args, **kwargs):
target_ref = k8s_client.V1ObjectReference(kind='Pod', resource_version='10', name='p-0',
namespace='default', uid='964dfeae-e79b-4476-8a5a-1920b5c2a69d')
address0 = k8s_client.V1EndpointAddress(ip='10.0.0.0', target_ref=target_ref)
address1 = k8s_client.V1EndpointAddress(ip='10.0.0.1')
port = k8s_client.V1EndpointPort(port=5432, name='postgresql', protocol='TCP')
subset = k8s_client.V1EndpointSubset(addresses=[address1, address0], ports=[port])
metadata = k8s_client.V1ObjectMeta(resource_version='1', labels={'f': 'b'}, name='test',
annotations={'optime': '1234', 'leader': 'p-0', 'ttl': '30s'})
endpoint = k8s_client.V1Endpoints(subsets=[subset], metadata=metadata)
metadata = k8s_client.V1ObjectMeta(resource_version='1')
return k8s_client.V1EndpointsList(metadata=metadata, items=[endpoint], kind='V1EndpointsList')
def mock_list_namespaced_pod(*args, **kwargs):
metadata = k8s_client.V1ObjectMeta(resource_version='1', name='p-0', annotations={'status': '{}'},
uid='964dfeae-e79b-4476-8a5a-1920b5c2a69d')
status = k8s_client.V1PodStatus(pod_ip='10.0.0.0')
spec = k8s_client.V1PodSpec(hostname='p-0', node_name='kind-control-plane', containers=[])
items = [k8s_client.V1Pod(metadata=metadata, status=status, spec=spec)]
return k8s_client.V1PodList(items=items, kind='PodList')
def mock_namespaced_kind(*args, **kwargs):
mock = Mock()
mock.metadata.resource_version = '2'
return mock
class BaseTestKubernetes(unittest.TestCase):
@patch('socket.TCP_KEEPIDLE', 4, create=True)
@patch('socket.TCP_KEEPINTVL', 5, create=True)
@patch('socket.TCP_KEEPCNT', 6, create=True)
@patch('kubernetes.config.load_kube_config', Mock())
@patch('kubernetes.client.api_client.ThreadPool', Mock(), create=True)
@patch.object(Thread, 'start', Mock())
@patch.object(k8s_client.CoreV1Api, 'list_namespaced_pod', mock_list_namespaced_pod)
@patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map)
def setUp(self, config=None):
config = config or {}
config.update(ttl=30, scope='test', name='p-0', loop_wait=10, retry_timeout=10, labels={'f': 'b'})
self.k = Kubernetes(config)
self.assertRaises(AttributeError, self.k._pods._build_cache)
self.k._pods._is_ready = True
self.assertRaises(AttributeError, self.k._kinds._build_cache)
self.k._kinds._is_ready = True
self.k.get_cluster()
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', mock_namespaced_kind)
class TestKubernetesConfigMaps(BaseTestKubernetes):
@patch('time.time', Mock(side_effect=[1, 10.9, 100]))
def test__wait_caches(self):
self.k._pods._is_ready = False
with self.k._condition:
self.assertRaises(RetryFailedError, self.k._wait_caches)
@patch('time.time', Mock(return_value=time.time() + 100))
def test_get_cluster(self):
self.k.get_cluster()
with patch.object(Kubernetes, '_wait_caches', Mock(side_effect=Exception)):
self.assertRaises(KubernetesError, self.k.get_cluster)
def test_take_leader(self):
self.k.take_leader()
self.k._leader_observed_record['leader'] = 'test'
self.k.patch_or_create = Mock(return_value=False)
self.k.take_leader()
def test_manual_failover(self):
with patch.object(k8s_client.CoreV1Api, 'patch_namespaced_config_map', Mock(side_effect=RetryFailedError(''))):
self.k.manual_failover('foo', 'bar')
def test_set_config_value(self):
self.k.set_config_value('{}')
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod')
def test_touch_member(self, mock_patch_namespaced_pod):
mock_patch_namespaced_pod.return_value.metadata.resource_version = '10'
self.k.touch_member({'role': 'replica'})
self.k._name = 'p-1'
self.k.touch_member({'state': 'running', 'role': 'replica'})
self.k.touch_member({'state': 'stopped', 'role': 'master'})
def test_initialize(self):
self.k.initialize()
def test_delete_leader(self):
self.k.delete_leader(1)
def test_cancel_initialization(self):
self.k.cancel_initialization()
@patch.object(k8s_client.CoreV1Api, 'delete_collection_namespaced_config_map',
Mock(side_effect=k8s_client.rest.ApiException(403, '')))
def test_delete_cluster(self):
self.k.delete_cluster()
def test_watch(self):
self.k.set_ttl(10)
self.k.watch(None, 0)
self.k.watch(None, 0)
def test_set_history_value(self):
self.k.set_history_value('{}')
class TestKubernetesEndpoints(BaseTestKubernetes):
@patch.object(k8s_client.CoreV1Api, 'list_namespaced_endpoints', mock_list_namespaced_endpoints)
def setUp(self, config=None):
super(TestKubernetesEndpoints, self).setUp({'use_endpoints': True, 'pod_ip': '10.0.0.0'})
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints')
def test_update_leader(self, mock_patch_namespaced_endpoints):
self.assertIsNotNone(self.k.update_leader('123'))
args = mock_patch_namespaced_endpoints.call_args[0]
self.assertEqual(args[2].subsets[0].addresses[0].target_ref.resource_version, '10')
self.k._kinds._object_cache['test'].subsets[:] = []
self.assertIsNotNone(self.k.update_leader('123'))
self.k._kinds._object_cache['test'].metadata.annotations['leader'] = 'p-1'
self.assertFalse(self.k.update_leader('123'))
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints', mock_namespaced_kind)
def test_update_leader_with_restricted_access(self):
self.assertIsNotNone(self.k.update_leader('123', True))
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_endpoints')
def test__update_leader_with_retry(self, mock_patch):
mock_patch.side_effect = k8s_client.rest.ApiException(502, '')
self.assertFalse(self.k.update_leader('123'))
mock_patch.side_effect = RetryFailedError('')
self.assertFalse(self.k.update_leader('123'))
mock_patch.side_effect = k8s_client.rest.ApiException(409, '')
with patch('time.time', Mock(side_effect=[0, 100, 200])):
self.assertFalse(self.k.update_leader('123'))
with patch('time.sleep', Mock()):
self.assertFalse(self.k.update_leader('123'))
mock_patch.side_effect = [k8s_client.rest.ApiException(409, ''), mock_namespaced_kind()]
self.k._kinds._object_cache['test'].metadata.resource_version = '2'
self.assertIsNotNone(self.k._update_leader_with_retry({}, '1', []))
@patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints',
Mock(side_effect=[k8s_client.rest.ApiException(500, ''), k8s_client.rest.ApiException(502, '')]))
def test_delete_sync_state(self):
self.assertFalse(self.k.delete_sync_state())
@patch.object(k8s_client.CoreV1Api, 'patch_namespaced_pod', mock_namespaced_kind)
@patch.object(k8s_client.CoreV1Api, 'create_namespaced_endpoints', mock_namespaced_kind)
@patch.object(k8s_client.CoreV1Api, 'create_namespaced_service',
Mock(side_effect=[True, False, k8s_client.rest.ApiException(500, '')]))
def test__create_config_service(self):
self.assertIsNotNone(self.k.patch_or_create_config({'foo': 'bar'}))
self.assertIsNotNone(self.k.patch_or_create_config({'foo': 'bar'}))
self.k.touch_member({'state': 'running', 'role': 'replica'})
class TestCacheBuilder(BaseTestKubernetes):
@patch.object(k8s_client.CoreV1Api, 'list_namespaced_config_map', mock_list_namespaced_config_map)
@patch('patroni.dcs.kubernetes.ObjectCache._watch')
def test__build_cache(self, mock_response):
mock_response.return_value.read_chunked.return_value = [json.dumps(
{'type': 'MODIFIED', 'object': {'metadata': {
'name': self.k.config_path, 'resourceVersion': '2', 'annotations': {self.k._CONFIG: 'foo'}}}}
).encode('utf-8'), ('\n' + json.dumps(
{'type': 'DELETED', 'object': {'metadata': {
'name': self.k.config_path, 'resourceVersion': '3'}}}
) + '\n' + json.dumps(
{'type': 'MDIFIED', 'object': {'metadata': {'name': self.k.config_path}}}
) + '\n' + json.dumps({'object': {'code': 410}}) + '\n').encode('utf-8')]
self.k._kinds._build_cache()
@patch('patroni.dcs.kubernetes.logger.error', Mock(side_effect=SleepException))
@patch('patroni.dcs.kubernetes.ObjectCache._build_cache', Mock(side_effect=Exception))
def test_run(self):
self.assertRaises(SleepException, self.k._pods.run)
@patch('time.sleep', Mock())
def test__list(self):
self.k._pods._func = Mock(side_effect=Exception)
self.assertRaises(Exception, self.k._pods._list)
|
#!/usr/bin/env python
# coding=utf-8
"""Performs all coverage"""
from __future__ import print_function
from optparse import OptionParser
from pkg_resources import load_entry_point
import subprocess
import sys
from coverage import coverage
from diff_coverage import diff_coverage
import settings
CREATE_XML_REPORT = True
CREATE_HTML_REPORT = True
def execute_nosetests():
"""Execute nosetests"""
try:
load_entry_point('nose', 'console_scripts', 'nosetests')()
except SystemExit:
pass
def measure_test_coverage():
"""Measure the test coverage from executing nosetests"""
coverage_obj = coverage(settings.COVERAGE_PATH)
coverage_obj.start()
execute_nosetests()
coverage_obj.stop()
return coverage_obj
def main():
opt = OptionParser(usage='usage: %prog [options...]')
opt.add_option('--no-xml', dest='no_xml', default=not CREATE_XML_REPORT,
action='store_true', help='Don\'t generate XML coverage report')
opt.add_option('--no-html', dest='no_html', default=not CREATE_HTML_REPORT,
action='store_true', help='Don\'t generate HTML coverage report')
(options, args) = opt.parse_args()
if args:
print('Does not take arguments')
print()
opt.print_help()
sys.exit(1)
coverage_obj = measure_test_coverage()
print()
print('Saving coverage report...')
coverage_obj.save()
if not options.no_xml:
print('Saving Cobertura (XML) report...')
coverage_obj.xml_report(outfile=settings.XML_REPORT_FILE)
if not options.no_html:
print('Saving HTML report...')
coverage_obj.html_report(directory=settings.HTML_REPORT_DIR)
print('Creating diff patch...')
subprocess.call('git diff %s > /tmp/diffpatch' % settings.COMPARE_WITH_BRANCH,
shell=True)
print('Creating diff coverage report...')
diff_coverage('/tmp/diffpatch')
if __name__ == '__main__':
main()
|
import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
import django
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.text import get_text_list
from django.utils.jslex import prepare_js_for_gettext
from django.utils import six
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
def handle_extensions(extensions=('html',), ignored=('py',)):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times. Note that the .py extension is ignored
here because of the way non-*.py files are handled in make_messages() (they
are copied to file.ext.py files to trick xgettext to parse them as Python
files).
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
set(['.html', '.js'])
>>> handle_extensions(['.html, txt,.tpl'])
set(['.html', '.tpl', '.txt'])
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set([x for x in ext_list if x.strip('.') not in ignored])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
output, errors = p.communicate()
return output, errors, p.returncode
def walk(root, topdown=True, onerror=None, followlinks=False,
ignore_patterns=None, verbosity=0, stdout=sys.stdout):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
if ignore_patterns is None:
ignore_patterns = []
dir_suffix = '%s*' % os.sep
norm_patterns = map(lambda p: p.endswith(dir_suffix)
and p[:-len(dir_suffix)] or p, ignore_patterns)
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
remove_dirs = []
for dirname in dirnames:
if is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns):
remove_dirs.append(dirname)
for dirname in remove_dirs:
dirnames.remove(dirname)
if verbosity > 1:
stdout.write('ignoring directory %s\n' % dirname)
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, stdout=sys.stdout, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(root, followlinks=symlinks,
ignore_patterns=ignore_patterns, verbosity=verbosity, stdout=stdout):
for filename in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, filename))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
all_files.extend([(dirpath, filename)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity, stdout=sys.stdout):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with open(django_po, 'rU') as fp:
m = plural_forms_re.search(fp.read())
if m:
if verbosity > 1:
stdout.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def write_pot_file(potfile, msgs, file, work_file, is_templatized):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if is_templatized:
old = '#: ' + work_file[2:]
new = '#: ' + file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with open(potfile, 'ab') as fp:
try:
fp.write(msgs)
except TypeError:
fp.write(msgs.encode('utf-8'))
def process_file(file, dirpath, potfile, domain, verbosity,
extensions, wrap, location, stdout=sys.stdout):
"""
Extract translatable literals from :param file: for :param domain:
creating or updating the :param potfile: POT file.
Uses the xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if verbosity > 1:
stdout.write('processing file %s in %s\n' % (file, dirpath))
_, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
is_templatized = True
orig_file = os.path.join(dirpath, file)
with open(orig_file) as fp:
src_data = fp.read()
src_data = prepare_js_for_gettext(src_data)
thefile = '%s.c' % file
work_file = os.path.join(dirpath, thefile)
with open(work_file, "w") as fp:
fp.write(src_data)
cmd = (
'xgettext -d %s -L C %s %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' %
(domain, wrap, location, work_file))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
is_templatized = file_ext in extensions
if is_templatized:
with open(orig_file, "rU") as fp:
src_data = fp.read()
thefile = '%s.py' % file
content = templatize(src_data, orig_file[2:])
with open(os.path.join(dirpath, thefile), "w") as fp:
fp.write(content)
work_file = os.path.join(dirpath, thefile)
cmd = (
'xgettext -d %s -L Python %s %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' %
(domain, wrap, location, work_file))
else:
return
msgs, errors, status = _popen(cmd)
if errors:
if status != STATUS_OK:
if is_templatized:
os.unlink(work_file)
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
elif verbosity > 0:
# Print warnings
stdout.write(errors)
if msgs:
write_pot_file(potfile, msgs, orig_file, work_file, is_templatized)
if is_templatized:
os.unlink(work_file)
def write_po_file(pofile, potfile, domain, locale, verbosity, stdout,
copy_pforms, wrap, location, no_obsolete):
"""
Creates of updates the :param pofile: PO file for :param domain: and :param
locale:. Uses contents of the existing :param potfile:.
Uses mguniq, msgmerge, and msgattrib GNU gettext utilities.
"""
msgs, errors, status = _popen('msguniq %s %s --to-code=utf-8 "%s"' %
(wrap, location, potfile))
if errors:
if status != STATUS_OK:
os.unlink(potfile)
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
if os.path.exists(pofile):
with open(potfile, 'w') as fp:
fp.write(msgs)
msgs, errors, status = _popen('msgmerge %s %s -q "%s" "%s"' %
(wrap, location, pofile, potfile))
if errors:
if status != STATUS_OK:
os.unlink(potfile)
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
elif copy_pforms:
msgs = copy_plural_forms(msgs, locale, domain, verbosity, stdout)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
with open(pofile, 'wb') as fp:
try:
fp.write(msgs)
except TypeError:
fp.write(msgs.encode('utf-8'))
os.unlink(potfile)
if no_obsolete:
msgs, errors, status = _popen(
'msgattrib %s %s -o "%s" --no-obsolete "%s"' %
(wrap, location, pofile, pofile))
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
def make_messages(locale=None, domain='django', verbosity=1, all=False,
extensions=None, symlinks=False, ignore_patterns=None, no_wrap=False,
no_location=False, no_obsolete=False, stdout=sys.stdout):
"""
Uses the ``locale/`` directory from the Django Git tree or an
application/project to process all files with translatable literals for
the :param domain: domain and :param locale: locale.
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
if ignore_patterns is None:
ignore_patterns = []
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
# Ignoring all contrib apps
ignore_patterns += ['contrib/*']
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django Git "
"tree or your project or app tree. If you did indeed run it "
"from the Git checkout or your project or application, "
"maybe you are just missing the conf/locale (in the django "
"tree) or locale (for project and application) directory? It "
"is not created automatically, you have to create it by hand "
"if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
raise CommandError(message)
# We require gettext version 0.15 or newer.
output, errors, status = _popen('xgettext --version')
if status != STATUS_OK:
raise CommandError("Error running xgettext. Note that Django "
"internationalization requires GNU gettext 0.15 or newer.")
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU "
"gettext 0.15 or newer. You are using version %s, please "
"upgrade your gettext toolset." % match.group())
locales = []
if locale is not None:
locales.append(locale)
elif all:
locale_dirs = six.lfilter(os.path.isdir, glob.glob('%s/*' % localedir))
locales = [os.path.basename(l) for l in locale_dirs]
wrap = '--no-wrap' if no_wrap else ''
location = '--no-location' if no_location else ''
for locale in locales:
if verbosity > 0:
stdout.write("processing language %s\n" % locale)
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity,
stdout, symlinks=symlinks):
process_file(file, dirpath, potfile, domain, verbosity, extensions,
wrap, location, stdout)
if os.path.exists(potfile):
write_po_file(pofile, potfile, domain, locale, verbosity, stdout,
not invoked_for_django, wrap, location, no_obsolete)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt", or "js" if the domain is "djangojs"). Separate multiple extensions with commas, or use -e multiple times.',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
make_option('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines"),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings"),
)
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
can_import_settings = False
def handle_noargs(self, *args, **options):
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
no_location = options.get('no_location')
no_obsolete = options.get('no_obsolete')
if domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt']
extensions = handle_extensions(exts)
if verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions,
symlinks, ignore_patterns, no_wrap, no_location, no_obsolete, self.stdout)
|
n = int(input('Digite um numero: '))
cont = 0
for i in range(1, n+1):
if n % i == 0:
print('\033[1;34m',end=' ')
cont +=1
else:
print('\033[m',end=' ')
print(i,end=' ')
if cont == 2:
print('\n\033[mPrimo')
else:
print('\n\033[mNão é Primo')
|
#!/usr/bin/env python2.7
# Copyright (c) 2013 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import csv
import json
import logging
import multiprocessing
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import time
import traceback
import zipfile
from inferlib import config, issues, utils
ANALYSIS_SUMMARY_OUTPUT = 'analysis_summary.txt'
DEFAULT_BUCK_OUT = os.path.join(utils.decode(os.getcwd()), 'buck-out')
DEFAULT_BUCK_OUT_GEN = os.path.join(DEFAULT_BUCK_OUT, 'gen')
INFER_JSON_REPORT = os.path.join(config.BUCK_INFER_OUT,
config.JSON_REPORT_FILENAME)
INFER_STATS = os.path.join(config.BUCK_INFER_OUT, config.STATS_FILENAME)
INFER_SCRIPT = """\
#!/usr/bin/env {python_executable}
import subprocess
import sys
cmd = {infer_command} + ['--', 'javac'] + sys.argv[1:]
subprocess.check_call(cmd)
"""
def prepare_build(args):
"""Creates script that redirects javac calls to infer and a local buck
configuration that tells buck to use that script.
"""
infer_options = [
'--buck',
'--analyzer', args.analyzer,
]
if args.java_jar_compiler is not None:
infer_options += [
'--java-jar-compiler',
args.java_jar_compiler,
]
if args.debug:
infer_options.append('--debug')
if args.no_filtering:
infer_options.append('--no-filtering')
if args.debug_exceptions:
infer_options += ['--debug-exceptions', '--no-filtering']
# Create a temporary directory as a cache for jar files.
infer_cache_dir = os.path.join(args.infer_out, 'cache')
if not os.path.isdir(infer_cache_dir):
os.mkdir(infer_cache_dir)
infer_options += ['--infer_cache', infer_cache_dir]
temp_files = [infer_cache_dir]
try:
infer_command = [utils.get_cmd_in_bin_dir('infer')] + infer_options
except subprocess.CalledProcessError as e:
logging.error('Could not find infer')
raise e
# make sure INFER_ANALYSIS is set when buck is called
logging.info('Setup Infer analysis mode for Buck: export INFER_ANALYSIS=1')
os.environ['INFER_ANALYSIS'] = '1'
# Export the Infer command as environment variables
os.environ['INFER_JAVA_BUCK_OPTIONS'] = json.dumps(infer_command)
os.environ['INFER_RULE_KEY'] = utils.infer_key(args.analyzer)
# Create a script to be called by buck
infer_script = None
with tempfile.NamedTemporaryFile(delete=False,
prefix='infer_',
suffix='.py',
dir='.') as infer_script:
logging.info('Creating %s' % infer_script.name)
infer_script.file.write(
utils.encode(INFER_SCRIPT.format(
python_executable=sys.executable,
infer_command=infer_command)))
st = os.stat(infer_script.name)
os.chmod(infer_script.name, st.st_mode | stat.S_IEXEC)
temp_files += [infer_script.name]
return temp_files, infer_script.name
def get_normalized_targets(targets):
""" Use buck to convert a list of input targets/aliases
into a set of the (transitive) target deps for all inputs"""
# this expands the targets passed on the command line, then filters away
# targets that are not Java/Android. you need to change this if you
# care about something other than Java/Android
TARGET_TYPES = "kind('android_library|java_library', deps('%s'))"
BUCK_GET_JAVA_TARGETS = ['buck', 'query', TARGET_TYPES]
buck_cmd = BUCK_GET_JAVA_TARGETS + targets
try:
targets = filter(
lambda line: len(line) > 0,
subprocess.check_output(buck_cmd).decode().strip().split('\n'))
return targets
except subprocess.CalledProcessError as e:
logging.error('Error while expanding targets with {0}'.format(
buck_cmd))
raise e
def init_stats(args, start_time):
"""Returns dictionary with target independent statistics.
"""
return {
'float': {},
'int': {
'cores': multiprocessing.cpu_count(),
'time': int(time.time()),
'start_time': int(round(start_time)),
},
'normal': {
'debug': str(args.debug),
'analyzer': args.analyzer,
'machine': platform.machine(),
'node': platform.node(),
'project': utils.decode(os.path.basename(os.getcwd())),
'revision': utils.vcs_revision(),
'branch': utils.vcs_branch(),
'system': platform.system(),
'infer_version': utils.infer_version(),
'infer_branch': utils.infer_branch(),
}
}
def store_performances_csv(infer_out, stats):
"""Stores the statistics about perfromances into a CSV file to be exported
to a database"""
perf_filename = os.path.join(infer_out, config.CSV_PERF_FILENAME)
with open(perf_filename, 'w') as csv_file_out:
csv_writer = csv.writer(csv_file_out)
keys = ['infer_version', 'project', 'revision', 'files', 'lines',
'cores', 'system', 'machine', 'node', 'total_time',
'capture_time', 'analysis_time', 'reporting_time', 'time']
int_stats = list(stats['int'].items())
normal_stats = list(stats['normal'].items())
flat_stats = dict(int_stats + normal_stats)
values = []
for key in keys:
if key in flat_stats:
values.append(flat_stats[key])
csv_writer.writerow(keys)
csv_writer.writerow(values)
csv_file_out.flush()
def get_harness_code():
all_harness_code = '\nGenerated harness code:\n'
for filename in os.listdir(DEFAULT_BUCK_OUT_GEN):
if 'InferGeneratedHarness' in filename:
all_harness_code += '\n' + filename + ':\n'
with open(os.path.join(DEFAULT_BUCK_OUT_GEN,
filename), 'r') as file_in:
all_harness_code += file_in.read()
return all_harness_code + '\n'
def get_basic_stats(stats):
files_analyzed = '{0} files ({1} lines) analyzed in {2}s\n\n'.format(
stats['int'].get('files', 0),
stats['int'].get('lines', 0),
stats['int']['total_time'],
)
phase_times = 'Capture time: {0}s\nAnalysis time: {1}s\n\n'.format(
stats['int'].get('capture_time', 0),
stats['int'].get('analysis_time', 0),
)
to_skip = {
'files',
'procedures',
'lines',
'cores',
'time',
'start_time',
'capture_time',
'analysis_time',
'reporting_time',
'total_time',
'makefile_generation_time'
}
bugs_found = 'Errors found:\n\n'
for key, value in sorted(stats['int'].items()):
if key not in to_skip:
bugs_found += ' {0:>8} {1}\n'.format(value, key)
basic_stats_message = files_analyzed + phase_times + bugs_found + '\n'
return basic_stats_message
def get_buck_stats():
trace_filename = os.path.join(
DEFAULT_BUCK_OUT,
'log',
'traces',
'build.trace'
)
ARGS = 'args'
SUCCESS_STATUS = 'success_type'
buck_stats = {}
try:
trace = utils.load_json_from_path(trace_filename)
for t in trace:
if SUCCESS_STATUS in t[ARGS]:
status = t[ARGS][SUCCESS_STATUS]
count = buck_stats.get(status, 0)
buck_stats[status] = count + 1
buck_stats_message = 'Buck build statistics:\n\n'
for key, value in sorted(buck_stats.items()):
buck_stats_message += ' {0:>8} {1}\n'.format(value, key)
return buck_stats_message
except IOError as e:
logging.error('Caught %s: %s' % (e.__class__.__name__, str(e)))
logging.error(traceback.format_exc())
return ''
class NotFoundInJar(Exception):
pass
def load_stats(opened_jar):
try:
return json.loads(opened_jar.read(INFER_STATS).decode())
except KeyError:
raise NotFoundInJar
def load_json_report(opened_jar):
try:
return json.loads(opened_jar.read(INFER_JSON_REPORT).decode())
except KeyError:
raise NotFoundInJar
def get_output_jars(targets):
if len(targets) == 0:
return []
else:
audit_output = subprocess.check_output(
['buck', 'audit', 'classpath'] + targets)
classpath_jars = audit_output.strip().split('\n')
return filter(os.path.isfile, classpath_jars)
def collect_results(args, start_time, targets):
"""Walks through buck-gen, collects results for the different buck targets
and stores them in in args.infer_out/results.csv.
"""
buck_stats = get_buck_stats()
logging.info(buck_stats)
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
f.write(buck_stats)
all_json_rows = set()
stats = init_stats(args, start_time)
accumulation_whitelist = list(map(re.compile, [
'^cores$',
'^time$',
'^start_time$',
'.*_pc',
]))
expected_analyzer = stats['normal']['analyzer']
expected_version = stats['normal']['infer_version']
for path in get_output_jars(targets):
try:
with zipfile.ZipFile(path) as jar:
# Accumulate integers and float values
target_stats = load_stats(jar)
found_analyzer = target_stats['normal']['analyzer']
found_version = target_stats['normal']['infer_version']
if found_analyzer != expected_analyzer \
or found_version != expected_version:
continue
else:
for type_k in ['int', 'float']:
items = target_stats.get(type_k, {}).items()
for key, value in items:
if not any(map(lambda r: r.match(key),
accumulation_whitelist)):
old_value = stats[type_k].get(key, 0)
stats[type_k][key] = old_value + value
json_rows = load_json_report(jar)
for row in json_rows:
all_json_rows.add(json.dumps(row))
# Override normals
stats['normal'].update(target_stats.get('normal', {}))
except NotFoundInJar:
pass
except zipfile.BadZipfile:
logging.warn('Bad zip file %s', path)
json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
# Convert all float values to integer values
for key, value in stats.get('float', {}).items():
stats['int'][key] = int(round(value))
# Delete the float entries before exporting the results
del(stats['float'])
with open(json_report, 'w') as report:
json_string = '['
json_string += ','.join(all_json_rows)
json_string += ']'
report.write(json_string)
report.flush()
print('\n')
json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
issues.print_and_save_errors(args.infer_out, args.project_root,
json_report, bugs_out, args.pmd_xml)
stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))
store_performances_csv(args.infer_out, stats)
stats_filename = os.path.join(args.infer_out, config.STATS_FILENAME)
utils.dump_json_to_path(stats, stats_filename)
basic_stats = get_basic_stats(stats)
if args.print_harness:
harness_code = get_harness_code()
basic_stats += harness_code
logging.info(basic_stats)
with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
f.write(basic_stats)
def cleanup(temp_files):
"""Removes the temporary infer files.
"""
for file in temp_files:
try:
logging.info('Removing %s' % file)
if os.path.isdir(file):
shutil.rmtree(file)
else:
os.unlink(file)
except IOError:
logging.error('Could not remove %s' % file)
parser = argparse.ArgumentParser()
parser.add_argument('--build-report', metavar='PATH', type=utils.decode)
parser.add_argument('--deep', action='store_true')
parser.add_argument('--keep-going', action='store_true')
parser.add_argument('--load-limit', '-L')
parser.add_argument('--no-cache', action='store_true')
parser.add_argument('--profile', action='store_true')
parser.add_argument('--shallow', action='store_true')
parser.add_argument('--num-threads', '-j', metavar='N')
parser.add_argument('--verbose', '-v', metavar='N', type=int)
parser.add_argument('targets', nargs='*', metavar='target',
help='Build targets to analyze')
class UnsuportedBuckCommand(Exception):
pass
def parse_buck_command(args):
build_keyword = 'build'
if build_keyword in args and len(args[args.index(build_keyword):]) > 1:
next_index = args.index(build_keyword) + 1
buck_args = args[next_index:]
parsed_args = parser.parse_args(buck_args)
base_cmd_without_targets = [p for p in buck_args
if p not in parsed_args.targets]
base_cmd = ['buck', build_keyword] + base_cmd_without_targets
return base_cmd, parsed_args
else:
raise UnsuportedBuckCommand(args)
class Wrapper:
def __init__(self, infer_args, buck_cmd):
self.timer = utils.Timer(logging.info)
# The reactive mode is not yet supported
if infer_args.reactive:
sys.stderr.write(
'Reactive is not supported for Java Buck project. Exiting.\n')
sys.exit(1)
self.infer_args = infer_args
self.timer.start('Computing library targets')
base_cmd, buck_args = parse_buck_command(buck_cmd)
self.buck_args = buck_args
self.normalized_targets = get_normalized_targets(
buck_args.targets)
self.buck_cmd = base_cmd + self.normalized_targets
self.timer.stop('%d targets computed', len(self.normalized_targets))
def _collect_results(self, start_time):
self.timer.start('Collecting results ...')
collect_results(self.infer_args, start_time, self.normalized_targets)
self.timer.stop('Done')
def run(self):
temp_files = []
start_time = time.time()
try:
logging.info('Starting the analysis')
if not os.path.isdir(self.infer_args.infer_out):
os.mkdir(self.infer_args.infer_out)
self.timer.start('Preparing build ...')
temp_files2, infer_script = prepare_build(self.infer_args)
temp_files += temp_files2
self.timer.stop('Build prepared')
if len(self.normalized_targets) == 0:
logging.info('Nothing to analyze')
else:
self.timer.start('Running Buck ...')
javac_config = ['--config', 'tools.javac=' + infer_script]
buck_cmd = self.buck_cmd + javac_config
subprocess.check_call(buck_cmd)
self.timer.stop('Buck finished')
self._collect_results(start_time)
return os.EX_OK
except KeyboardInterrupt as e:
self.timer.stop('Exiting')
sys.exit(0)
except subprocess.CalledProcessError as e:
if self.buck_args.keep_going:
print('Buck failed, but continuing the analysis '
'because --keep-going was passed')
self._collect_results(start_time)
return os.EX_OK
raise e
finally:
cleanup(temp_files)
|
# -*- coding: utf-8 -*-
"""
jishaku.paginators
~~~~~~~~~~~~~~~~~~
Paginator-related tools and interfaces for Jishaku.
:copyright: (c) 2019 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import asyncio
import collections
import re
import discord
from discord.ext import commands
from jishaku.hljs import get_language
__all__ = ('EmojiSettings', 'PaginatorInterface', 'PaginatorEmbedInterface',
'WrappedPaginator', 'FilePaginator')
# emoji settings, this sets what emoji are used for PaginatorInterface
EmojiSettings = collections.namedtuple('EmojiSettings', 'start back forward end close')
EMOJI_DEFAULT = EmojiSettings(
start="\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}",
back="\N{BLACK LEFT-POINTING TRIANGLE}",
forward="\N{BLACK RIGHT-POINTING TRIANGLE}",
end="\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}",
close="\N{BLACK SQUARE FOR STOP}"
)
class PaginatorInterface: # pylint: disable=too-many-instance-attributes
"""
A message and reaction based interface for paginators.
"""
def __init__(self, bot: commands.Bot, paginator: commands.Paginator, **kwargs):
if not isinstance(paginator, commands.Paginator):
raise TypeError('paginator must be a commands.Paginator instance')
self._display_page = 0
self.bot = bot
self.message = None
self.paginator = paginator
self.owner = kwargs.pop('owner', None)
self.emojis = kwargs.pop('emoji', EMOJI_DEFAULT)
self.timeout = kwargs.pop('timeout', 7200)
self.delete_message = kwargs.pop('delete_message', False)
self.sent_page_reactions = False
self.task: asyncio.Task = None
self.update_lock: asyncio.Lock = asyncio.Semaphore(value=kwargs.pop('update_max', 2))
if self.page_size > self.max_page_size:
raise ValueError(
f'Paginator passed has too large of a page size for this interface. '
f'({self.page_size} > {self.max_page_size})'
)
@property
def pages(self):
"""
Returns the paginator's pages without prematurely closing the active page.
"""
# protected access has to be permitted here to not close the paginator's pages
# pylint: disable=protected-access
paginator_pages = list(self.paginator._pages)
if len(self.paginator._current_page) > 1:
paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or ''))
# pylint: enable=protected-access
return paginator_pages
@property
def page_count(self):
"""
Returns the page count of the internal paginator.
"""
return len(self.pages)
@property
def display_page(self):
"""
Returns the current page the paginator interface is on.
"""
self._display_page = max(0, min(self.page_count - 1, self._display_page))
return self._display_page
@display_page.setter
def display_page(self, value):
"""
Sets the current page the paginator is on. Automatically pushes values inbounds.
"""
self._display_page = max(0, min(self.page_count - 1, value))
max_page_size = 2000
@property
def page_size(self) -> int:
"""
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
"""
page_count = self.page_count
return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}')
@property
def send_kwargs(self) -> dict:
"""
A property that returns the kwargs forwarded to send/edit when updating the page.
As this must be compatible with both `discord.TextChannel.send` and `discord.Message.edit`,
it should be a dict containing 'content', 'embed' or both.
"""
display_page = self.display_page
page_num = f'\nPage {display_page + 1}/{self.page_count}'
content = self.pages[display_page] + page_num
return {'content': content}
async def add_line(self, *args, **kwargs):
"""
A proxy function that allows this PaginatorInterface to remain locked to the last page
if it is already on it.
"""
display_page = self.display_page
page_count = self.page_count
self.paginator.add_line(*args, **kwargs)
new_page_count = self.page_count
if display_page + 1 == page_count:
# To keep position fixed on the end, update position to new last page and update message.
self._display_page = new_page_count
self.bot.loop.create_task(self.update())
async def send_to(self, destination: discord.abc.Messageable):
"""
Sends a message to the given destination with this interface.
This automatically creates the response task for you.
"""
self.message = await destination.send(**self.send_kwargs)
# add the close reaction
await self.message.add_reaction(self.emojis.close)
if self.task:
self.task.cancel()
self.task = self.bot.loop.create_task(self.wait_loop())
# if there is more than one page, and the reactions haven't been sent yet, send navigation emotes
if not self.sent_page_reactions and self.page_count > 1:
await self.send_all_reactions()
return self
async def send_all_reactions(self):
"""
Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only.
"""
for emoji in filter(None, self.emojis):
try:
await self.message.add_reaction(emoji)
except discord.NotFound:
# the paginator has probably already been closed
break
self.sent_page_reactions = True
@property
def closed(self):
"""
Is this interface closed?
"""
if not self.task:
return False
return self.task.done()
async def wait_loop(self):
"""
Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`.
"""
start, back, forward, end, close = self.emojis
def check(payload: discord.RawReactionActionEvent):
"""
Checks if this reaction is related to the paginator interface.
"""
owner_check = not self.owner or payload.user_id == self.owner.id
emoji = payload.emoji
if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji():
emoji = emoji.name
tests = (
owner_check,
payload.message_id == self.message.id,
emoji,
emoji in self.emojis,
payload.user_id != self.bot.user.id
)
return all(tests)
try:
while not self.bot.is_closed():
payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout)
emoji = payload.emoji
if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji():
emoji = emoji.name
if emoji == close:
await self.message.delete()
return
if emoji == start:
self._display_page = 0
elif emoji == end:
self._display_page = self.page_count - 1
elif emoji == back:
self._display_page -= 1
elif emoji == forward:
self._display_page += 1
self.bot.loop.create_task(self.update())
try:
await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id))
except discord.Forbidden:
pass
except (asyncio.CancelledError, asyncio.TimeoutError):
if self.delete_message:
return await self.message.delete()
for emoji in filter(None, self.emojis):
try:
await self.message.remove_reaction(emoji, self.message.guild.me)
except (discord.Forbidden, discord.NotFound):
pass
async def update(self):
"""
Updates this interface's messages with the latest data.
"""
if self.update_lock.locked():
return
async with self.update_lock:
if self.update_lock.locked():
# if this engagement has caused the semaphore to exhaust,
# we are overloaded and need to calm down.
await asyncio.sleep(1)
if not self.message:
# too fast, stagger so this update gets through
await asyncio.sleep(0.5)
if not self.sent_page_reactions and self.page_count > 1:
self.bot.loop.create_task(self.send_all_reactions())
self.sent_page_reactions = True # don't spawn any more tasks
await self.message.edit(**self.send_kwargs)
class PaginatorEmbedInterface(PaginatorInterface):
"""
A subclass of :class:`PaginatorInterface` that encloses content in an Embed.
"""
def __init__(self, *args, **kwargs):
self._embed = kwargs.pop('embed', None) or discord.Embed()
super().__init__(*args, **kwargs)
@property
def send_kwargs(self) -> dict:
display_page = self.display_page
self._embed.description = self.pages[display_page]
self._embed.set_footer(text=f'Page {display_page + 1}/{self.page_count}')
return {'embed': self._embed}
max_page_size = 2048
@property
def page_size(self) -> int:
return self.paginator.max_size
class WrappedPaginator(commands.Paginator):
"""
A paginator that allows automatic wrapping of lines should they not fit.
This is useful when paginating unpredictable output,
as it allows for line splitting on big chunks of data.
Delimiters are prioritized in the order of their tuple.
Parameters
-----------
wrap_on: tuple
A tuple of wrapping delimiters.
include_wrapped: bool
Whether to include the delimiter at the start of the new wrapped line.
"""
def __init__(self, *args, wrap_on=('\n', ' '), include_wrapped=True, **kwargs):
super().__init__(*args, **kwargs)
self.wrap_on = wrap_on
self.include_wrapped = include_wrapped
def add_line(self, line='', *, empty=False):
true_max_size = self.max_size - len(self.prefix) - 2
while len(line) > true_max_size:
search_string = line[0:true_max_size - 1]
wrapped = False
for delimiter in self.wrap_on:
position = search_string.rfind(delimiter)
if position > 0:
super().add_line(line[0:position], empty=empty)
wrapped = True
if self.include_wrapped:
line = line[position:]
else:
line = line[position + len(delimiter):]
break
if not wrapped:
break # this will probably always cause an exception
super().add_line(line, empty=empty)
class FilePaginator(commands.Paginator):
"""
A paginator of syntax-highlighted codeblocks, read from a file-like.
Parameters
-----------
fp
A file-like (implements ``fp.read``) to read the data for this paginator from.
line_span: Optional[Tuple[int, int]]
A linespan to read from the file. If None, reads the whole file.
language_hints: Tuple[str]
A tuple of strings that may hint to the language of this file.
This could include filenames, MIME types, or shebangs.
A shebang present in the actual file will always be prioritized over this.
"""
__encoding_regex = re.compile(br'coding[=:]\s*([-\w.]+)')
def __init__(self, fp, line_span=None, language_hints=(), **kwargs):
language = ''
for hint in language_hints:
language = get_language(hint)
if language:
break
if not language:
try:
language = get_language(fp.name)
except AttributeError:
pass
raw_content = fp.read()
try:
lines = raw_content.decode('utf-8').split('\n')
except UnicodeDecodeError as exc:
# This file isn't UTF-8.
# By Python and text-editor convention,
# there may be a hint as to what the actual encoding is
# near the start of the file.
encoding_match = self.__encoding_regex.search(raw_content[:128])
if encoding_match:
encoding = encoding_match.group(1)
else:
raise exc
try:
lines = raw_content.decode(encoding.decode('utf-8')).split('\n')
except UnicodeDecodeError as exc2:
raise exc2 from exc
del raw_content
# If the first line is a shebang,
if lines[0].startswith('#!'):
# prioritize its declaration over the extension.
language = get_language(lines[0]) or language
super().__init__(prefix=f'```{language}', suffix='```', **kwargs)
if line_span:
line_span = sorted(line_span)
if min(line_span) < 1 or max(line_span) > len(lines):
raise ValueError("Linespan goes out of bounds.")
lines = lines[line_span[0] - 1:line_span[1]]
for line in lines:
self.add_line(line)
class WrappedFilePaginator(FilePaginator, WrappedPaginator):
"""
Combination of FilePaginator and WrappedPaginator.
In other words, a FilePaginator that supports line wrapping.
"""
|
import argparse
import numpy as np
import random
from PIL import Image
action_list = [[0, 1], [0, -1], [1, 0], [-1, 0]]
def random_walk(canvas, ini_x, ini_y, length):
x = ini_x
y = ini_y
img_size = canvas.shape[-1]
x_list = []
y_list = []
for i in range(length):
r = random.randint(0, len(action_list) - 1)
x = np.clip(x + action_list[r][0], a_min=0, a_max=img_size - 1)
y = np.clip(y + action_list[r][1], a_min=0, a_max=img_size - 1)
x_list.append(x)
y_list.append(y)
canvas[np.array(x_list), np.array(y_list)] = 0
return canvas
if __name__ == '__main__':
import os
parser = argparse.ArgumentParser()
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--N', type=int, default=10000)
parser.add_argument('--save_dir', type=str, default='masks')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
for i in range(args.N):
canvas = np.ones((args.image_size, args.image_size)).astype("i")
ini_x = random.randint(0, args.image_size - 1)
ini_y = random.randint(0, args.image_size - 1)
mask = random_walk(canvas, ini_x, ini_y, args.image_size**2)
print("save:", i, np.sum(mask))
img = Image.fromarray(mask * 255).convert('1')
img.save('{:s}/{:06d}.jpg'.format(args.save_dir, i))
|
from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
itervalues)
from collections import OrderedDict
import datetime
from functools import reduce
import re
import textwrap
import numpy as np
import pandas as pd
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
self._merge_latex = False
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format : str
Formatting to float data columns
align : str
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format : str
Formatting to array if type is float
align : str
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : str
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, str):
self.title = title
else:
if results is not None:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
else:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: str
Float formatting for summary of parameters (optional)
title : str
Title of the summary table (optional)
xname : list[str] of length equal to the number of parameters
Names of the independent variables (optional)
yname : str
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '}'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
to_replace = ('\\\\hline\\n\\\\hline\\n\\\\'
'end{tabular}\\n\\\\begin{tabular}{.*}\\n')
if self._merge_latex:
# create single tabular object for summary_col
tab = re.sub(to_replace, r'\\midrule\n', tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = max(tables[i].shape[1] - 1, 1)
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff # TODO: be more specific
_model_types = {'OLS': 'Ordinary least squares',
'GLS': 'Generalized least squares',
'GLSAR': 'Generalized least squares with AR(p)',
'WLS': 'Weighted least squares',
'RLM': 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM': 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
rsquared_type = '' if results.k_constant else ' (uncentered)'
info['R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared_adj # noqa:E501
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
except (AttributeError, KeyError, NotImplementedError):
# NOTE: some models do not have loglike defined (RLM),
# so raise NotImplementedError
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : {str, None}
optional name for the endogenous variable, default is "y"
xname : {list[str], None}
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : str
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, bse, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
try:
data.index = results.model.data.param_names
except AttributeError:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.iloc[:, 1] = '(' + res.iloc[:, 1] + ')'
# Significance stars
if stars:
idx = res.iloc[:, 3] < .1
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .05
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .01
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
# Stack Coefs and Std.Errors
res = res.iloc[:, :2]
res = res.iloc[:, :2]
rsquared = rsquared_adj = np.nan
if hasattr(result, 'rsquared'):
rsquared = result.rsquared
if hasattr(result, 'rsquared_adj'):
rsquared_adj = result.rsquared_adj
r_result = pd.DataFrame({'Basic': [rsquared], 'Adj.': [rsquared_adj]},
index=['R-squared'])
if not np.all(np.isnan(np.asarray(r_result))):
for col in r_result:
r_result[col] = r_result[col].apply(lambda x: float_format % x)
try:
res = pd.DataFrame(res).append(r_result, sort=True)
except TypeError:
# TODO: Remove when min pandas >= 0.23
res = pd.DataFrame(res).append(r_result)
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except AttributeError:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=(), stars=False,
info_dict=None, regressor_order=(), drop_omitted=False):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : str, optional
float format for coefficients and standard errors
Default : '%.4f'
model_names : list[str], optional
Must have same length as the number of results. If the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list[str], optional
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
drop_omitted : bool, optional
Includes regressors that are not specified in regressor_order. If
False, regressors not specified will be appended to end of the list.
If True, only regressors in regressors_list will be included.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
def merg(x, y):
return x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
def f(idx):
return sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(pd.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
if drop_omitted:
summ = summ.loc[regressor_order]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df, name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
def merg(x, y):
return x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry._merge_latex = True
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except (ValueError, TypeError):
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.iloc[:, 0] = [str(x) + int(pad_index) * ' '
for x in dat.iloc[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = settings[0]['float_format'] if settings else '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
|
# -*- coding: utf-8 -*-
#
"""
MIT License
Copyright (c) 2018 Christof Küstner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Switch to Python3 unicode literals and print() function
from __future__ import unicode_literals, print_function
import matplotlib.pyplot as plt
import matplotlib.ticker as mpl_ticker
import numpy as np
def apply_din461(ax, x_unit_name, y_unit_name,
x_left_to_right=True, y_bottom_to_top=True):
"""
Applies the DIN 461 for units and labels
@param ax: Axis to be motified
@type ax: matplotlib.axes
@param x_unit_name: Name of the unit in x direction
@type x_unit_name: unicode
@param y_unit_name: Name of the unit in y direction
@type y_unit_name: unicode
@param x_left_to_right: If True, arrow from left to right
@type x_left_to_right: bool
@param y_bottom_to_top: If True, arrow bottom to top
@type y_bottom_to_top: bool
"""
# updates can only be applied if plot has been plotted/updated
ax.figure.canvas.draw()
# add arrow to x axis label
label_text = ax.xaxis.get_label().get_text()
if x_left_to_right:
label_text += " $\longrightarrow$"
else:
label_text = "$\longleftarrow$ " + label_text
ax.set_xlabel(label_text)
# add arrow to y axis label
label_text = ax.yaxis.get_label().get_text()
if y_bottom_to_top:
label_text += " $\longrightarrow$"
else:
label_text = "$\longleftarrow$ " + label_text
ax.set_ylabel(label_text)
# change the x unit name
def x_tick_formatter(x, pos):
visible_labels = [t for t in ax.get_xticklabels() if t.get_visible()]
x_number_of_ticks = len(visible_labels)
if pos == x_number_of_ticks - 2:
return x_unit_name
else:
return unicode(x)
ax.xaxis.set_major_locator(mpl_ticker.MaxNLocator(prune="upper"))
ax.xaxis.set_major_formatter(mpl_ticker.FuncFormatter(x_tick_formatter))
# change the y unit name
def y_tick_formatter(x, pos):
visible_labels = [t for t in ax.get_yticklabels() if t.get_visible()]
y_number_of_ticks = len(visible_labels)
if pos == y_number_of_ticks - 2:
return y_unit_name
else:
return unicode(x)
ax.yaxis.set_major_locator(mpl_ticker.MaxNLocator(prune="upper"))
ax.yaxis.set_major_formatter(mpl_ticker.FuncFormatter(y_tick_formatter))
if __name__ == "__main__":
# Minimal example (tested in Python 2.x)
t = np.arange(0.0, 1.0 + 0.01, 0.01)
s = np.cos(4 * np.pi * t) + 2
plt.plot(t, s)
plt.xlabel("Time $t$", fontsize=25)
plt.ylabel("Voltage $U$", fontsize=20)
ax = plt.gca()
apply_din461(ax, "s", "V")
plt.tight_layout()
plt.show()
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Simple agent which chooses a random label from the label candidates if
they are available. If candidates are not available, it repeats the label.
"""
import random
from parlai.core.agents import Agent
class RandomCandidateAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = 'RandomCandidateAgent'
random.seed(42)
def act(self):
obs = self.observation
if obs is None:
return {'text': 'Nothing to reply to yet.'}
reply = {}
reply['id'] = self.getID()
label_candidates = obs.get('label_candidates')
if label_candidates:
random.shuffle(label_candidates)
reply['text_candidates'] = label_candidates
reply['text'] = label_candidates[0]
else:
# reply with I don't know.
reply['text'] = "I don't know."
return reply
|
import tensorrt as trt
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .repeat import *
from .exview import convert_exview
@tensorrt_converter('torch.Tensor.expand')
def convert_expand(ctx):
old_args = ctx.method_args
input = ctx.method_args[0]
if isinstance(ctx.method_args[1:], int):
sizes = ctx.method_args[1:]
else:
sizes = ctx.method_args[1]
output = ctx.method_return
repeat_shape = []
for i in range(output.dim()):
if i < output.dim()-input.dim():
repeat_shape.append(output.shape[i])
else:
repeat_shape.append(output.shape[i]//input.shape[i+input.dim()-output.dim()])
ctx.method_args = [input]+repeat_shape
ctx.method_return = output
convert_repeat(ctx)
ctx.method_args=old_args
|
"""This module contains the general information for StorageNvmeSwitch ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageNvmeSwitchConsts:
LINK_STATUS_DEGRADED = "degraded"
LINK_STATUS_DELETED = "deleted"
LINK_STATUS_FAILED = "failed"
LINK_STATUS_FLASH_FAILED = "flash-failed"
LINK_STATUS_MODERATE_FAULT = "moderate-fault"
LINK_STATUS_OPTIMAL = "optimal"
LINK_STATUS_SEVERE_FAULT = "severe-fault"
LINK_STATUS_UNKNOWN = "unknown"
LINK_STATUS_UNRESPONSIVE = "unresponsive"
M_STATUS_DEGRADED = "degraded"
M_STATUS_DELETED = "deleted"
M_STATUS_FAILED = "failed"
M_STATUS_FLASH_FAILED = "flash-failed"
M_STATUS_MODERATE_FAULT = "moderate-fault"
M_STATUS_OPTIMAL = "optimal"
M_STATUS_SEVERE_FAULT = "severe-fault"
M_STATUS_UNKNOWN = "unknown"
M_STATUS_UNRESPONSIVE = "unresponsive"
OPER_STATE_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPER_STATE_AUTO_UPGRADE = "auto-upgrade"
OPER_STATE_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPER_STATE_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPER_STATE_CHASSIS_INTRUSION = "chassis-intrusion"
OPER_STATE_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPER_STATE_CONFIG = "config"
OPER_STATE_DECOMISSIONING = "decomissioning"
OPER_STATE_DEGRADED = "degraded"
OPER_STATE_DISABLED = "disabled"
OPER_STATE_DISCOVERY = "discovery"
OPER_STATE_DISCOVERY_FAILED = "discovery-failed"
OPER_STATE_EQUIPMENT_PROBLEM = "equipment-problem"
OPER_STATE_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPER_STATE_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPER_STATE_IDENTIFY = "identify"
OPER_STATE_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPER_STATE_INOPERABLE = "inoperable"
OPER_STATE_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPER_STATE_MALFORMED_FRU = "malformed-fru"
OPER_STATE_NON_OPTIMAL = "non-optimal"
OPER_STATE_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPER_STATE_NOT_SUPPORTED = "not-supported"
OPER_STATE_OPERABLE = "operable"
OPER_STATE_PEER_COMM_PROBLEM = "peer-comm-problem"
OPER_STATE_PERFORMANCE_PROBLEM = "performance-problem"
OPER_STATE_POST_FAILURE = "post-failure"
OPER_STATE_POWER_PROBLEM = "power-problem"
OPER_STATE_POWERED_OFF = "powered-off"
OPER_STATE_REMOVED = "removed"
OPER_STATE_THERMAL_PROBLEM = "thermal-problem"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UNSUPPORTED_CONFIG = "unsupported-config"
OPER_STATE_UPGRADE_PROBLEM = "upgrade-problem"
OPER_STATE_VOLTAGE_PROBLEM = "voltage-problem"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PERF_LOWER_CRITICAL = "lower-critical"
PERF_LOWER_NON_CRITICAL = "lower-non-critical"
PERF_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
PERF_NOT_SUPPORTED = "not-supported"
PERF_OK = "ok"
PERF_UNKNOWN = "unknown"
PERF_UPPER_CRITICAL = "upper-critical"
PERF_UPPER_NON_CRITICAL = "upper-non-critical"
PERF_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
POWER_DEGRADED = "degraded"
POWER_ERROR = "error"
POWER_FAILED = "failed"
POWER_NOT_SUPPORTED = "not-supported"
POWER_OFF = "off"
POWER_OFFDUTY = "offduty"
POWER_OFFLINE = "offline"
POWER_OK = "ok"
POWER_ON = "on"
POWER_ONLINE = "online"
POWER_POWER_SAVE = "power-save"
POWER_TEST = "test"
POWER_UNKNOWN = "unknown"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
SWITCH_STATUS_DEGRADED = "degraded"
SWITCH_STATUS_DELETED = "deleted"
SWITCH_STATUS_FAILED = "failed"
SWITCH_STATUS_FLASH_FAILED = "flash-failed"
SWITCH_STATUS_MODERATE_FAULT = "moderate-fault"
SWITCH_STATUS_OPTIMAL = "optimal"
SWITCH_STATUS_SEVERE_FAULT = "severe-fault"
SWITCH_STATUS_UNKNOWN = "unknown"
SWITCH_STATUS_UNRESPONSIVE = "unresponsive"
TEMPERATURE_NOT_APPLICABLE = "not-applicable"
THERMAL_LOWER_CRITICAL = "lower-critical"
THERMAL_LOWER_NON_CRITICAL = "lower-non-critical"
THERMAL_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
THERMAL_NOT_SUPPORTED = "not-supported"
THERMAL_OK = "ok"
THERMAL_UNKNOWN = "unknown"
THERMAL_UPPER_CRITICAL = "upper-critical"
THERMAL_UPPER_NON_CRITICAL = "upper-non-critical"
THERMAL_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
TYPE_DIRECT_ATTACHED = "DIRECT-ATTACHED"
TYPE_HHHL = "HHHL"
TYPE_MSWITCH = "MSWITCH"
TYPE_UNKNOWN = "UNKNOWN"
VOLTAGE_LOWER_CRITICAL = "lower-critical"
VOLTAGE_LOWER_NON_CRITICAL = "lower-non-critical"
VOLTAGE_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
VOLTAGE_NOT_SUPPORTED = "not-supported"
VOLTAGE_OK = "ok"
VOLTAGE_UNKNOWN = "unknown"
VOLTAGE_UPPER_CRITICAL = "upper-critical"
VOLTAGE_UPPER_NON_CRITICAL = "upper-non-critical"
VOLTAGE_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
class StorageNvmeSwitch(ManagedObject):
"""This is StorageNvmeSwitch class."""
consts = StorageNvmeSwitchConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageNvmeSwitch", "storageNvmeSwitch", "nvme-switch-[id]", VersionMeta.Version323a, "InputOutput", 0x3f, [], ["read-only"], [u'computeBoard'], [u'faultInst', u'firmwareBootDefinition', u'firmwareRunning'], [None])
prop_meta = {
"bus_address": MoPropertyMeta("bus_address", "busAddress", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version323a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"component_id": MoPropertyMeta("component_id", "componentId", "uint", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"drive_count": MoPropertyMeta("drive_count", "driveCount", "uint", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version323a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], ["0-4294967295"]),
"link_status": MoPropertyMeta("link_status", "linkStatus", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "deleted", "failed", "flash-failed", "moderate-fault", "optimal", "severe-fault", "unknown", "unresponsive"], []),
"location_dn": MoPropertyMeta("location_dn", "locationDn", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"m_status": MoPropertyMeta("m_status", "mStatus", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "deleted", "failed", "flash-failed", "moderate-fault", "optimal", "severe-fault", "unknown", "unresponsive"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"perf": MoPropertyMeta("perf", "perf", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
"power": MoPropertyMeta("power", "power", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "error", "failed", "not-supported", "off", "offduty", "offline", "ok", "on", "online", "power-save", "test", "unknown"], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"product_id": MoPropertyMeta("product_id", "productId", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"product_revision": MoPropertyMeta("product_revision", "productRevision", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version323a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_name": MoPropertyMeta("switch_name", "switchName", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"switch_status": MoPropertyMeta("switch_status", "switchStatus", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "deleted", "failed", "flash-failed", "moderate-fault", "optimal", "severe-fault", "unknown", "unresponsive"], []),
"temperature": MoPropertyMeta("temperature", "temperature", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, r"""^([\-]?)([123]?[1234]?)([0-9]{0,36})(([.])([0-9]{1,10}))?$""", ["not-applicable"], ["0-4294967295"]),
"thermal": MoPropertyMeta("thermal", "thermal", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["DIRECT-ATTACHED", "HHHL", "MSWITCH", "UNKNOWN"], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"voltage": MoPropertyMeta("voltage", "voltage", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
}
prop_map = {
"busAddress": "bus_address",
"childAction": "child_action",
"componentId": "component_id",
"dn": "dn",
"driveCount": "drive_count",
"id": "id",
"linkStatus": "link_status",
"locationDn": "location_dn",
"mStatus": "m_status",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operState": "oper_state",
"operability": "operability",
"perf": "perf",
"power": "power",
"presence": "presence",
"productId": "product_id",
"productRevision": "product_revision",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"status": "status",
"switchName": "switch_name",
"switchStatus": "switch_status",
"temperature": "temperature",
"thermal": "thermal",
"type": "type",
"vendor": "vendor",
"voltage": "voltage",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.bus_address = None
self.child_action = None
self.component_id = None
self.drive_count = None
self.link_status = None
self.location_dn = None
self.m_status = None
self.model = None
self.oper_qualifier_reason = None
self.oper_state = None
self.operability = None
self.perf = None
self.power = None
self.presence = None
self.product_id = None
self.product_revision = None
self.revision = None
self.sacl = None
self.serial = None
self.status = None
self.switch_name = None
self.switch_status = None
self.temperature = None
self.thermal = None
self.type = None
self.vendor = None
self.voltage = None
ManagedObject.__init__(self, "StorageNvmeSwitch", parent_mo_or_dn, **kwargs)
|
import dataclasses
from operator import itemgetter
from ...account import models as account_models
from ...app import models as app_models
from ...attribute import models as attribute_models
from ...checkout import models as checkout_models
from ...core.exceptions import PermissionDenied
from ...core.models import ModelWithMetadata
from ...discount import models as discount_models
from ...giftcard import models as giftcard_models
from ...order import models as order_models
from ...page import models as page_models
from ...payment import models as payment_models
from ...product import models as product_models
from ...shipping import models as shipping_models
from ...shipping.interface import ShippingMethodData
from ...warehouse import models as warehouse_models
from ..utils import get_user_or_app_from_context
from .permissions import PRIVATE_META_PERMISSION_MAP
def resolve_object_with_metadata_type(instance):
# Imports inside resolvers to avoid circular imports.
from ...invoice import models as invoice_models
from ...menu import models as menu_models
from ..account import types as account_types
from ..app import types as app_types
from ..attribute import types as attribute_types
from ..checkout import types as checkout_types
from ..discount import types as discount_types
from ..giftcard import types as giftcard_types
from ..invoice import types as invoice_types
from ..menu import types as menu_types
from ..order import types as order_types
from ..page import types as page_types
from ..payment import types as payment_types
from ..product import types as product_types
from ..shipping import types as shipping_types
from ..warehouse import types as warehouse_types
if isinstance(instance, ModelWithMetadata):
MODEL_TO_TYPE_MAP = {
app_models.App: app_types.App,
attribute_models.Attribute: attribute_types.Attribute,
product_models.Category: product_types.Category,
checkout_models.Checkout: checkout_types.Checkout,
product_models.Collection: product_types.Collection,
product_models.DigitalContent: product_types.DigitalContent,
order_models.Fulfillment: order_types.Fulfillment,
giftcard_models.GiftCard: giftcard_types.GiftCard,
order_models.Order: order_types.Order,
invoice_models.Invoice: invoice_types.Invoice,
page_models.Page: page_types.Page,
page_models.PageType: page_types.PageType,
payment_models.Payment: payment_types.Payment,
product_models.Product: product_types.Product,
product_models.ProductType: product_types.ProductType,
product_models.ProductVariant: product_types.ProductVariant,
menu_models.Menu: menu_types.Menu,
menu_models.MenuItem: menu_types.MenuItem,
shipping_models.ShippingMethod: shipping_types.ShippingMethodType,
shipping_models.ShippingZone: shipping_types.ShippingZone,
account_models.User: account_types.User,
warehouse_models.Warehouse: warehouse_types.Warehouse,
discount_models.Sale: discount_types.Sale,
discount_models.Voucher: discount_types.Voucher,
}
return MODEL_TO_TYPE_MAP.get(instance.__class__, None), instance.pk
elif dataclasses.is_dataclass(instance):
DATACLASS_TO_TYPE_MAP = {ShippingMethodData: shipping_types.ShippingMethod}
return DATACLASS_TO_TYPE_MAP.get(instance.__class__, None), instance.id
def resolve_metadata(metadata: dict):
return sorted(
[{"key": k, "value": v} for k, v in metadata.items()],
key=itemgetter("key"),
)
def resolve_private_metadata(root: ModelWithMetadata, info):
item_type, item_id = resolve_object_with_metadata_type(root)
if not item_type:
raise NotImplementedError(
f"Model {type(root)} can't be mapped to type with metadata. "
"Make sure that model exists inside MODEL_TO_TYPE_MAP."
)
get_required_permission = PRIVATE_META_PERMISSION_MAP[item_type.__name__]
if not get_required_permission:
raise PermissionDenied()
required_permissions = get_required_permission(info, item_id) # type: ignore
if not isinstance(required_permissions, list):
raise PermissionDenied()
requester = get_user_or_app_from_context(info.context)
if not requester.has_perms(required_permissions):
raise PermissionDenied()
return resolve_metadata(root.private_metadata)
|
import logging
import re
from collections import defaultdict
from . import Analysis
from ..knowledge_base import KnowledgeBase
from .. import SIM_PROCEDURES
from ..codenode import HookNode
from ..sim_variable import SimConstantVariable, SimRegisterVariable, SimMemoryVariable, SimStackVariable
l = logging.getLogger(name=__name__)
class ConstantPropagation(object):
def __init__(self, constant, constant_assignment_loc, constant_consuming_loc):
self.constant = constant
self.constant_assignment_loc = constant_assignment_loc
self.constant_consuming_loc = constant_consuming_loc
def __repr__(self):
s = "<Constant %#x propagates from %#x to %#x>" % (
self.constant,
self.constant_assignment_loc.ins_addr,
self.constant_consuming_loc.ins_addr
)
return s
class RedundantStackVariable(object):
def __init__(self, argument, stack_variable, stack_variable_consuming_locs):
self.argument = argument
self.stack_variable = stack_variable
self.stack_variable_consuming_locs = stack_variable_consuming_locs
self.argument_register_as_retval = False
def __repr__(self):
s = "<StackVar %s for %s at %d locations%s>" % (
self.stack_variable,
self.argument,
len(self.stack_variable_consuming_locs),
" - retval" if self.argument_register_as_retval else "",
)
return s
class RegisterReallocation(object):
def __init__(self, stack_variable, register_variable, stack_variable_sources, stack_variable_consumers,
prologue_addr, prologue_size, epilogue_addr, epilogue_size):
"""
Constructor.
:param SimStackVariable stack_variable:
:param SimRegisterVariable register_variable:
:param list stack_variable_sources:
:param list stack_variable_consumers:
:param int prologue_addr:
:param int prologue_size:
:param int epilogue_addr:
:param int epilogue_size:
"""
self.stack_variable = stack_variable
self.register_variable = register_variable
self.stack_variable_sources = stack_variable_sources
self.stack_variable_consumers = stack_variable_consumers
self.prologue_addr = prologue_addr
self.prologue_size = prologue_size
self.epilogue_addr = epilogue_addr
self.epilogue_size = epilogue_size
def __repr__(self):
s = "<RegisterReallocation %s for %s with %d sources and %d consumers>" % (
self.register_variable,
self.stack_variable,
len(self.stack_variable_sources),
len(self.stack_variable_consumers),
)
return s
class DeadAssignment(object):
def __init__(self, pv):
"""
Constructor.
:param angr.analyses.ddg.ProgramVariable pv: The assignment to remove.
"""
self.pv = pv
def __repr__(self):
s = "<DeadAssignmentElimination %s>" % self.pv
return s
class BinaryOptimizer(Analysis):
"""
This is a collection of binary optimization techniques we used in Mechanical Phish during the finals of Cyber Grand
Challange. It focuses on dealing with some serious speed-impacting code constructs, and *sort of* worked on *some*
CGC binaries compiled with O0. Use this analysis as a reference of how to use data dependency graph and such.
There is no guarantee that BinaryOptimizer will ever work on non-CGC binaries. Feel free to give us PR or MR, but
please *do not* ask for support of non-CGC binaries.
"""
BLOCKS_THRESHOLD = 500 # do not optimize a function if it has more than this number of blocks
def __init__(self, cfg, techniques):
self.cfg = cfg
if techniques is None:
raise Exception('At least one optimization technique must be specified.')
supported_techniques = {
'constant_propagation',
'redundant_stack_variable_removal',
'register_reallocation',
'dead_assignment_elimination',
}
if techniques - supported_techniques:
raise Exception('At least one optimization technique specified is not supported.')
self._techniques = techniques.copy()
self.constant_propagations = [ ]
self.redundant_stack_variables = [ ]
self.register_reallocations = [ ]
self.dead_assignments = [ ]
self.optimize()
def optimize(self):
for f in self.kb.functions.values(): # type: angr.knowledge.Function
# if there are unresolved targets in this function, we do not try to optimize it
unresolvable_targets = (SIM_PROCEDURES['stubs']['UnresolvableJumpTarget'],
SIM_PROCEDURES['stubs']['UnresolvableCallTarget'])
if any([ n.sim_procedure in unresolvable_targets for n in f.graph.nodes()
if isinstance(n, HookNode) ]):
continue
if len(f.block_addrs_set) > self.BLOCKS_THRESHOLD:
continue
self._optimize_function(f)
def _optimize_function(self, function):
"""
:param angr.knowledge.Function function:
:return:
"""
#if function.addr != 0x8048250:
# return
func_kb = KnowledgeBase(self.project)
# switch to non-optimized IR, since optimized IR will optimize away register reads/writes
# for example,
# .text:08048285 add eax, [ebp+var_8]
# .text:08048288 mov [ebp+var_C], eax
# becomes
# 06 | ------ IMark(0x8048285, 3, 0) ------
# 07 | t25 = Add32(t24,0xfffffff8)
# 08 | t5 = LDle:I32(t25)
# 09 | t4 = Add32(t2,t5)
# 10 | PUT(eip) = 0x08048288
# 11 | ------ IMark(0x8048288, 3, 0) ------
# 12 | t27 = Add32(t24,0xfffffff4)
# 13 | STle(t27) = t4
# 14 | PUT(eip) = 0x0804828b
# there is no write to or read from eax
cfg = self.project.analyses.CFGEmulated(kb=func_kb,
call_depth=1,
base_graph=function.graph,
keep_state=True,
starts=(function.addr,),
iropt_level=0,
)
ddg = self.project.analyses.DDG(kb=func_kb,
cfg=cfg
)
if 'constant_propagation' in self._techniques:
self._constant_propagation(function, ddg.simplified_data_graph)
if 'redundant_stack_variable_removal' in self._techniques:
self._redundant_stack_variable_removal(function, ddg.simplified_data_graph)
if 'register_reallocation' in self._techniques:
self._register_reallocation(function, ddg.simplified_data_graph)
if 'dead_assignment_elimination' in self._techniques:
self._dead_assignment_elimination(function, ddg.simplified_data_graph)
def _constant_propagation(self, function, data_graph): #pylint:disable=unused-argument
"""
:param function:
:param networkx.MultiDiGraph data_graph:
:return:
"""
# find all edge sequences that looks like const->reg->memory
for n0 in data_graph.nodes():
if not isinstance(n0.variable, SimConstantVariable):
continue
n1s = list(data_graph.successors(n0))
if len(n1s) != 1:
continue
n1 = n1s[0]
if not isinstance(n1.variable, SimRegisterVariable):
continue
if len(list(data_graph.predecessors(n1))) != 1:
continue
n2s = list(data_graph.successors(n1))
if len(n2s) != 1:
continue
n2 = n2s[0]
if not isinstance(n2.variable, SimMemoryVariable):
continue
n2_inedges = data_graph.in_edges(n2, data=True)
if len([ 0 for _, _, data in n2_inedges if 'type' in data and data['type'] == 'mem_data' ]) != 1:
continue
cp = ConstantPropagation(n0.variable.value, n0.location, n2.location)
self.constant_propagations.append(cp)
# print n0, n1, n2
def _redundant_stack_variable_removal(self, function, data_graph):
"""
If an argument passed from the stack (i.e. dword ptr [ebp+4h]) is saved to a local variable on the stack at the
beginning of the function, and this local variable was never modified anywhere in this function, and no pointer
of any stack variable is saved in any register, then we can replace all references to this local variable to
that argument instead.
:param function:
:param networkx.MultiDiGraph data_graph:
:return:
"""
# check if there is any stack pointer being stored into any register other than esp
# basically check all consumers of stack pointers
stack_ptrs = [ ]
sp_offset = self.project.arch.registers['esp'][0]
bp_offset = self.project.arch.registers['ebp'][0]
for n in data_graph.nodes():
if isinstance(n.variable, SimRegisterVariable) and n.variable.reg in (sp_offset, bp_offset):
stack_ptrs.append(n)
# for each stack pointer variable, make sure none of its consumers is a general purpose register
for stack_ptr in stack_ptrs:
out_edges = data_graph.out_edges(stack_ptr, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
# we don't care about killing edges
continue
if isinstance(dst.variable, SimRegisterVariable) and dst.variable.reg < 40 and \
dst.variable.reg not in (sp_offset, bp_offset):
# oops
l.debug('Function %s does not satisfy requirements of redundant stack variable removal.',
repr(function)
)
return
argument_variables = [ ]
for n in data_graph.nodes():
if isinstance(n.variable, SimStackVariable) and n.variable.base == 'bp' and n.variable.offset >= 0:
argument_variables.append(n)
if not argument_variables:
return
#print function
#print argument_variables
argument_to_local = { }
argument_register_as_retval = set()
# for each argument, find its correspondence on the local stack frame
for argument_variable in argument_variables:
# is it copied to the stack?
successors0 = list(data_graph.successors(argument_variable))
if not successors0:
continue
if len(successors0) != 1:
continue
if isinstance(successors0[0].variable, SimRegisterVariable):
# argument -> register -> stack
out_edges = data_graph.out_edges(successors0[0], data=True)
successors1 = [ s for _, s, data in out_edges if 'type' not in data or data['type'] != 'kill' ]
if len(successors1) == 1:
successor1 = successors1[0]
if isinstance(successor1.variable, SimStackVariable):
if (successor1.variable.base == 'sp' and successor1.variable.offset > 0) or \
(successor1.variable.base == 'bp' and successor1.variable.offset < 0):
# yes it's copied onto the stack!
argument_to_local[argument_variable] = successor1
# if the register is eax, and it's not killed later, it might be the return value of this function
# in that case, we cannot eliminate the instruction that moves stack argument to that register
if successors0[0].variable.reg == self.project.arch.registers['eax'][0]:
killers = [ s for _, s, data in out_edges if 'type' in data and data['type'] == 'kill']
if not killers:
# it might be the return value
argument_register_as_retval.add(argument_variable)
else:
# TODO:
import ipdb; ipdb.set_trace()
#import pprint
#pprint.pprint(argument_to_local, width=160)
# find local correspondence that are not modified throughout this function
redundant_stack_variables = [ ]
for argument, local_var in argument_to_local.items():
# local_var cannot be killed anywhere
out_edges = data_graph.out_edges(local_var, data=True)
consuming_locs = [ ]
for _, consumer, data in out_edges:
consuming_locs.append(consumer.location)
if 'type' in data and data['type'] == 'kill':
break
else:
# no killing edges. the value is not changed!
rsv = RedundantStackVariable(argument, local_var, consuming_locs)
if argument in argument_register_as_retval:
rsv.argument_register_as_retval = True
redundant_stack_variables.append(rsv)
self.redundant_stack_variables.extend(redundant_stack_variables)
def _register_reallocation(self, function, data_graph):
"""
Find unused registers throughout the function, and use those registers to replace stack variables.
Only functions that satisfy the following criteria can be optimized in this way:
- The function does not call any other function.
- The function does not use esp to index any stack variable.
- Prologue and epilogue of the function is identifiable.
- At least one register is not used in the entire function.
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
"""
# make sure this function does not call other functions
if function.callout_sites:
return
if len(function.endpoints) != 1:
return
# identify function prologue and epilogue
startpoint_block = self.project.factory.block(function.startpoint.addr).capstone
startpoint_insns = startpoint_block.insns
# supported function prologues:
#
# push ebp
# mov ebp, esp
# sub esp, [0-9a-f]+h
#
# push ebp
# mov ebp, esp
# push eax
if len(startpoint_insns) < 3:
return
insn0, insn1, insn2 = startpoint_insns[:3]
if not (insn0.mnemonic == 'push' and insn0.op_str == 'ebp'):
return
if not (insn1.mnemonic == 'mov' and insn1.op_str == 'ebp, esp'):
return
if not (insn2.mnemonic == 'sub' and re.match(r"esp, [0-9a-fx]+", insn2.op_str)) and \
not (insn2.mnemonic == 'push' and insn2.op_str == 'eax'):
return
endpoint_block = self.project.factory.block(function.endpoints[0].addr).capstone
endpoint_insns = endpoint_block.insns
# supported function epilogues:
#
# add esp, [0-9a-f]+h
# pop ebp
# ret
if len(endpoint_insns) < 3:
return
insn3, insn4, insn5 = endpoint_insns[-3:]
if not (insn3.mnemonic == 'add' and re.match(r"esp, [0-9a-fx]+", insn3.op_str)):
return
if not (insn4.mnemonic == 'pop' and insn4.op_str == 'ebp'):
return
if not insn5.mnemonic == 'ret':
return
# make sure esp is not used anywhere else - all stack variables must be indexed using ebp
esp_offset = self.project.arch.registers['esp'][0]
ebp_offset = self.project.arch.registers['ebp'][0]
esp_variables = [ ]
for n in data_graph.nodes():
if isinstance(n.variable, SimRegisterVariable) and n.variable.reg == esp_offset:
esp_variables.append(n)
# find out all call instructions
call_insns = set()
for src, dst, data in function.transition_graph.edges(data=True):
if 'type' in data and data['type'] == 'call':
src_block = function._get_block(src.addr)
call_insns.add(src_block.instruction_addrs[-1])
# there should be six esp variables + all call sites
# push ebp (insn0 - read, insn0 - write) ; sub esp, 0xXX (insn2) ;
# add esp, 0xXX (insn3) ; pop ebp (insn4) ; ret (insn5)
esp_insns = set( n.location.ins_addr for n in esp_variables )
if esp_insns != { insn0.address, insn2.address, insn3.address, insn4.address, insn5.address } | call_insns:
return
prologue_addr = insn0.address
prologue_size = insn0.size + insn1.size + insn2.size
epilogue_addr = insn3.address
epilogue_size = insn3.size + insn4.size + insn5.size
# look at consumer of those esp variables. no other instruction should be consuming them
# esp_consumer_insns = { insn0.address, insn1.address, insn2.address, insn3.address, insn4.address,
# insn5.address} | esp_insns
# for esp_variable in esp_variables: # type: angr.analyses.ddg.ProgramVariable
# consumers = data_graph.successors(esp_variable)
# if any([ consumer.location.ins_addr not in esp_consumer_insns for consumer in consumers ]):
# return
# make sure we never gets the address of those stack variables into any register
# say, lea edx, [ebp-0x4] is forbidden
# check all edges in data graph
for src, dst, data in data_graph.edges(data=True):
if isinstance(dst.variable, SimRegisterVariable) and \
dst.variable.reg != ebp_offset and \
dst.variable.reg < 40:
#to a register other than ebp
if isinstance(src.variable, SimRegisterVariable) and \
src.variable.reg == ebp_offset:
# from ebp
l.debug("Found a lea operation from ebp at %#x. Function %s cannot be optimized.",
dst.location.ins_addr,
repr(function),
)
return
# we definitely don't want to mess with fp or sse operations
for node in data_graph.nodes():
if isinstance(node.variable, SimRegisterVariable) and \
72 <= node.variable.reg < 288: # offset(mm0) <= node.variable.reg < offset(cs)
l.debug('Found a float-point/SSE register access at %#x. Function %s cannot be optimized.',
node.location.ins_addr,
repr(function)
)
return
l.debug("RegisterReallocation: function %s satisfies the criteria.", repr(function))
# nice. let's see if we can optimize this function
# do we have free registers?
used_general_registers = set()
for n in data_graph.nodes():
if isinstance(n.variable, SimRegisterVariable):
if n.variable.reg < 40: # this is a hardcoded limit - we only care about general registers
used_general_registers.add(n.variable.reg)
registers = self.project.arch.registers
all_general_registers = { #registers['eax'][0], registers['ecx'][0], registers['edx'][0],
registers['ebx'][0], registers['edi'][0], registers['esi'][0],
registers['esp'][0], registers['ebp'][0]
}
unused_general_registers = all_general_registers - used_general_registers
if not unused_general_registers:
l.debug("RegisterReallocation: function %s does not have any free register.", repr(function))
return
l.debug("RegisterReallocation: function %s has %d free register(s): %s",
repr(function),
len(unused_general_registers),
", ".join([self.project.arch.register_names[u] for u in unused_general_registers ])
)
# find local stack variables of size 4
stack_variables = set()
for n in data_graph.nodes():
if isinstance(n.variable, SimStackVariable) and \
n.variable.base == 'bp' and \
n.variable.size == 4 and \
n.variable.offset < 0:
stack_variables.add(n)
# alright, now we need to make sure that stack variables are never accessed by indexes
# in other words, they must be accessed directly in forms of 'dword ptr [ebp+x]'
# it's easy to do this: we get mem_addr predecessors of each stack variable, and make sure there are only two of
# them: one is ebp, the other one is a constant
#
# ah, also, since we do not want to mess with crazy fp registers, we further require none of the stack variable
# sources and consumers is a FP register.
filtered_stack_variables = set()
for stack_variable in stack_variables:
failed = False
# check how they are accessed
in_edges = data_graph.in_edges(stack_variable, data=True)
for src, _, data in in_edges:
if 'type' in data and data['type'] == 'mem_addr':
if isinstance(src.variable, SimRegisterVariable) and src.variable.reg == ebp_offset:
# ebp
pass
elif isinstance(src.variable, SimConstantVariable):
# the constant
pass
else:
# ouch
failed = True
break
if isinstance(src.variable, SimRegisterVariable) and src.variable.reg >= 72:
# it comes from a FP register
failed = True
break
if failed:
continue
# check consumers
out_edges = data_graph.out_edges(stack_variable, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
continue
if isinstance(dst.variable, SimRegisterVariable) and dst.variable.reg >= 72:
# an FP register is the consumer
failed = True
break
if failed:
continue
filtered_stack_variables.add(stack_variable)
# order the stack variables by the sum of their in and out degrees.
stack_variable_to_degree = defaultdict(int)
stack_variable_sources = defaultdict(list)
for sv in filtered_stack_variables:
stack_variable_to_degree[sv.variable] += data_graph.in_degree(sv)
stack_variable_to_degree[sv.variable] += data_graph.out_degree(sv)
stack_variable_sources[sv.variable].append(sv)
sorted_stack_variables = sorted(stack_variable_to_degree.keys(),
key=lambda sv: stack_variable_to_degree[sv],
reverse=True
)
# aha these are the ones that we can replace!
for reg, sv in zip(unused_general_registers, sorted_stack_variables):
non_initial_sources = [src for src in stack_variable_sources[sv] if not src.initial]
if not non_initial_sources:
# we failed to find any source for it, which indicates a failure in our dependence analysis
# skip
continue
# get consumers
consumers = set()
for src in stack_variable_sources[sv]:
out_edges = data_graph.out_edges(src, data=True)
for _, dst, data in out_edges:
if 'type' not in data or data['type'] != 'kill':
consumers.add(dst)
rr = RegisterReallocation(sv, SimRegisterVariable(reg, 4), non_initial_sources,
list(consumers), prologue_addr, prologue_size, epilogue_addr, epilogue_size
)
self.register_reallocations.append(rr)
l.debug("RegisterReallocation: %s will replace %s in function %s.",
rr.register_variable,
rr.stack_variable,
repr(function)
)
def _dead_assignment_elimination(self, function, data_graph): #pylint:disable=unused-argument
"""
Remove assignments to registers that has no consumers, but immediately killed.
BROKEN - DO NOT USE IT
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
"""
register_pvs = set()
for node in data_graph.nodes():
if isinstance(node.variable, SimRegisterVariable) and \
node.variable.reg is not None and \
node.variable.reg < 40:
register_pvs.add(node)
for reg in register_pvs:
# does it have a consumer?
out_edges = data_graph.out_edges(reg, data=True)
consumers = [ ]
killers = [ ]
for _, _, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(data)
else:
consumers.append(data)
if not consumers and killers:
# we can remove the assignment!
da = DeadAssignment(reg)
self.dead_assignments.append(da)
from angr.analyses import AnalysesHub
AnalysesHub.register_default('BinaryOptimizer', BinaryOptimizer)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moby_dev_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import logging
import struct
import six
import pytest
import nose
import numpy as np
from test_cpu_helper import TestModelInferenceCPU
TestBase = TestModelInferenceCPU()
@pytest.mark.p0
def test_inference_resnet50_cpu():
"""
Inference and check value
resnet50 cpu model
Args:
None
Return:
None
"""
model_name = "ResNet50_pretrained"
tmp_path = os.path.join(TestBase.model_root, "classification")
model_path = os.path.join(tmp_path, model_name, "model")
data_path = os.path.join(tmp_path, model_name, "data/data.json")
delta = 0.0001
res, exp = TestBase.get_infer_results(model_path, data_path)
for i in range(len(res)):
TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import timeit
import numpy as np
import sys
import random as rand
class solutions:
def bsearch(self, nums,target):
"""
二分查找
:param nums:
:param target:
:return:
"""
nums=sorted(nums)
left=0
right=len(nums)-1
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出(其实 python 也不可能溢出)
if nums[mid]==target:
return mid # 返回下标
elif target>nums[mid]:
left=mid+1
elif target<nums[mid]:
right=mid-1
return None
def sqrt(self,a):
"""
求一个数的平方根 ,要求精确到小数点后 6 位
分治法
:param a:
:return:
"""
left = 0
right=float(a)
while right-left >= 1.0e-6:
mid= (right+left)/2.0
if pow(mid,2)==a:
return mid
elif a>pow(mid,2):
left = mid
elif a<pow(mid,2):
right = mid
return left
def besearch_first_equal(self,nums,target):
"""
查找第一个值等于给定值的元素
:param nums: [1,3,4,5,6,8,8,8,11,18]
:param target: 8
:return: 5
"""
left=0
right=len(nums)-1
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出
if nums[mid]==target:
#往左边找
if mid==0 or (mid-1>=0 and nums[mid-1]!=target):
return mid
else:
while mid>=0 and nums[mid]==target:
mid-=1
return mid+1
elif target>nums[mid]:
left=mid+1
elif target<nums[mid]:
right=mid-1
return None
def besearch_last_equal(self,nums,target):
"""
查找最后一个值等于给定值的元素
:param nums: [1,3,4,5,6,8,8,8,11,18]
:param target: 8
:return: 7
"""
left=0
right=len(nums)-1
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出
if nums[mid]==target:
#往右边找
if mid== len(nums)-1 or (mid+1<=len(nums)-1 and nums[mid+1]!=target):
return mid
else:
while mid<=len(nums)-1 and nums[mid]==target:
mid+=1
return mid-1
elif target>nums[mid]:
left=mid+1
elif target<nums[mid]:
right=mid-1
return None
def besearch_first_large(self,nums,target):
"""
查找第一个大于等于给定值的元素
:param nums: [1,3,4,5,6,8,8,8,11,18]
:param target: 7
:return: 5 nums[5]==8
"""
left=0
right=len(nums)-1
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出
if target>nums[mid]:
left=mid+1
elif target<=nums[mid]:
# 往左边找
if mid == 0 or (mid - 1 >= 0 and nums[mid - 1] < target):
return mid
else:
while mid >= 0 and nums[mid] >= target:
mid -= 1
return mid + 1
return None
def besearch_last_small(self,nums,target):
"""
查找最后一个小于等于给定值的元素
:param nums: [1,3,4,5,6,8,8,8,11,18]
:param target: 7
:return: 4 nums[4]==6
"""
left=0
right=len(nums)-1
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出
if target>=nums[mid]:
# 往右边找
if mid== len(nums)-1 or (mid+1<=len(nums)-1 and nums[mid+1] > target):
return mid
else:
while mid <= len(nums) - 1 and nums[mid] <= target:
mid += 1
return mid - 1
elif target<nums[mid]:
right=mid+1
return None
def __bsearch(self, nums,target,left,right):
"""
二分查找
:param nums:
:param target:
:return:
"""
while left<=right:
mid= left+(right-left)//2 #防止 left+right 过大溢出(其实 python 也不可能溢出)
if nums[mid]==target:
return mid # 返回下标
elif target>nums[mid]:
left=mid+1
elif target<nums[mid]:
right=mid-1
return None
def besearch_loop_sorted(self,nums,target):
"""
有序数组是一个循环有序数组,比如 4,5,6,1,2,3。
实现一个求 “值等于给定值” 的二分查找算法
:param nums: [4,5,6,1,2,3]
:param target: 6
:return: 2
"""
divide_index=0 #分界下标
i = 1
while i<len(nums):
if nums[i] < nums[i-1]:
divide_index=i
break
i+=1
nums1_left=0
nums1_right=divide_index-1
nums2_left=divide_index
nums2_right=len(nums)-1
res=None
if target <= nums[nums1_right]:
res=self.__bsearch(nums,target,nums1_left,nums1_right)
else: # target>nums[nums1_right]
res = self.__bsearch(nums, target, nums2_left, nums2_right)
return res
if __name__ == '__main__':
sol = solutions()
##-------- part1 二分查找 基础 ------------##
# l=np.random.randint(int(1e7),size=int(1e7)) # 1000 万个整数
# l1=list(l)
# print('l1 list memory_size:', sys.getsizeof(l1),'B') # 90000112 B= 90MB
#
# l2=set(l)
# print('l2 hash memory_size:', sys.getsizeof(l2),'B') # 268435680 B= 268MB
#
# start = timeit.default_timer()
# print('by bsearch: ')
# print(sol.bsearch(l1, 100)) # 在l1 中查找100
# end = timeit.default_timer()
# print('time: ', end-start ,'s')
#
# start = timeit.default_timer()
# print('by hash search: ')
# print( 100 in l2 )
# end = timeit.default_timer()
# print('time: ', end-start ,'s')
print(pow(9,0.5),sol.sqrt(9))
print(pow(4, 0.5), sol.sqrt(4))
print(pow(10, 0.5), sol.sqrt(10))
##-------------- part1 end -----------------##
##-------- part2 二分查找 进阶 ------------##
##-------------- part2 end -----------------##
# print(sol.besearch_first_equal( [1,3,4,5,6,8,8,8,11,18],8))
# print(sol.besearch_last_equal([1, 3, 4, 5, 6, 8, 8, 8, 11, 18], 8))
# print(sol.besearch_first_large([1, 3, 4, 5, 6, 8, 8, 8, 11, 18], 7))
# print(sol.besearch_last_small([1, 3, 4, 5, 6, 8, 8, 8, 11, 18], 7))
print(sol.besearch_loop_sorted([4,5,6,1,2,3],6))
|
from .recursions.explicit_recursions import *
import random
import sys
##################################################################################################
def backtrack( self, contribs_input, mode = 'mfe' ):
'''
modes are:
mfe = backtrack, following maximum boltzmann weight. note that this is not *quite* MFE
stochastic = choose track based on boltzmann weights
enumerative = follow all tracks!
'''
#print_contribs( contribs_input )
if len( contribs_input ) == 0: return []
contrib_sum = sum( contrib[0] for contrib in contribs_input )
if mode == 'enumerative':
contribs = [ contrib for contrib in contribs_input ] # like a deepcopy
elif mode == 'mfe': contribs = [ max_contrib(contribs_input) ]
elif mode == 'stochastic' : contribs = [ get_random_contrib( contribs_input ) ]
p_bps = [] # list of tuples of (p_structure, bps_structure) for each structure
N = self.N
for contrib in contribs: # each option ('contribution' to this partition function of this sub-region)
if ( contrib[0] == 0.0 ): continue
p_contrib = contrib[0]/contrib_sum
p_bps_contrib = [ [p_contrib,[]] ]
# each 'branch'; e.g., C_eff(i,k) Z_BP(k+1, j) has a C_eff and a Z_BP branch
for backtrack_info in contrib[1]:
( Z_backtrack, i, j ) = backtrack_info
if ( i == j ): continue
for base_pair_type in self.params.base_pair_types:
if Z_backtrack == self.Z_BPq[ base_pair_type ]:
base_pair = [i%N,j%N]
base_pair.sort()
# TODO: could also add type of base pair here -- we have the info!
p_bps_contrib = [ [p_bp[0], p_bp[1]+[tuple( base_pair )] ] for p_bp in p_bps_contrib ]
backtrack_contribs = Z_backtrack.get_contribs(self,i%N,j%N)
p_bps_component = backtrack( self, backtrack_contribs, mode )
if len( p_bps_component ) == 0: continue
# put together all branches
p_bps_contrib_new = []
for p_bps1 in p_bps_contrib:
for p_bps2 in p_bps_component:
p_bps_contrib_new.append( [p_bps1[0]*p_bps2[0], p_bps1[1]+p_bps2[1]] )
p_bps_contrib = p_bps_contrib_new
p_bps += p_bps_contrib
return p_bps
##################################################################################################
def mfe( self, Z_final_contrib ):
p_bps = backtrack( self, Z_final_contrib, mode = 'mfe' )
assert( len(p_bps) == 1 )
p,bps = p_bps[0]
bps.sort()
return (bps,p)
##################################################################################################
def boltzmann_sample( self, Z_final_contrib ):
p_bps = backtrack( self, Z_final_contrib, mode = 'stochastic' )
assert( len(p_bps) == 1 )
return (p_bps[0][1],p_bps[0][0])
##################################################################################################
def enumerative_backtrack( self ):
return backtrack( self, self.Z_final.get_contribs(self,0), 'enumerative' )
##################################################################################################
def get_random_contrib( contribs ):
# Random sample weighted by probability. Must be a simple function for this.
contrib_cumsum = [ contribs[0][0] ]
for contrib in contribs[1:]: contrib_cumsum.append( contrib_cumsum[-1] + contrib[0] )
r = random.random() * contrib_cumsum[ -1 ]
for (idx,psum) in enumerate( contrib_cumsum ):
if r < psum: return contribs[idx]
def max_contrib(contribs):
max_contrib_val = None
best_contrib = None
for c in contribs:
if max_contrib_val is None or \
c[0] > max_contrib_val:
best_contrib = c
max_contrib_val = c[0]
return best_contrib
def print_contrib( contrib ):
sys.stdout.write('[')
print '%s:' % contrib[0],
for n,backtrack_info in enumerate(contrib[1]):
sys.stdout.write( '%s(%d,%d)' % (backtrack_info[0].name,backtrack_info[1],backtrack_info[2]) )
if n < len( contrib[1] )-1: sys.stdout.write(',')
sys.stdout.write(']')
def print_contribs( contribs ):
print '[ ',
for contrib in contribs[:-1]:
print_contrib( contrib )
print '; ',
print_contrib( contribs[-1] )
print ' ]'
return
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/10 23:25
# @Author : Raymound luo
# @Mail : luolinhao1998@gmail.com
# @File : evluator.py
# @Software: PyCharm
# @Describe:
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.metrics import f1_score, roc_auc_score, normalized_mutual_info_score, adjusted_rand_score, \
silhouette_score, cluster
from sklearn.svm import SVC
import numpy as np
import os
import json
def KNN_train(x, y):
knn = KNeighborsClassifier()
knn.fit(x, y)
def purity_score(y_true, y_pred):
# compute contingency matrix (also called confusion matrix)
contingency_matrix = cluster.contingency_matrix(y_true, y_pred)
# return purity
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)
class Evaluator(object):
def __init__(self, method, CF_data, LP_data, result_path='./result', random_state=123, max_iter=150,
n_jobs=1):
self.method = method
self.CF_data = CF_data
self.LP_data = LP_data
self.result_path = result_path
if not os.path.exists(self.result_path):
os.makedirs((self.result_path))
self.random_state = random_state
self.max_iter = max_iter
self.n_jobs = n_jobs
self.result = {}
def get_model(self):
if self.method == "KNN":
model = KNeighborsClassifier()
elif self.method == "LR":
model = LogisticRegression(solver='lbfgs', random_state=self.random_state, max_iter=self.max_iter,
n_jobs=self.n_jobs, multi_class='auto')
elif self.method == "SVM":
model = SVC()
return model
def evluate_CF(self, emb_feature):
features, labels, num_classes, train_idx, test_idx = self.CF_data
model = self.get_model()
model.fit(emb_feature[train_idx], labels[train_idx])
score = model.predict(emb_feature[test_idx]) #
micro_f1 = f1_score(labels[test_idx], score, average='micro')
macro_f1 = f1_score(labels[test_idx], score, average='macro')
self.result['CF'] = {'Micro f1': micro_f1, 'Macro f1': macro_f1}
print("Node classification result: ")
print('Micro f1: ', micro_f1)
print('Macro f1: ', macro_f1)
def evluate_LP(self, emb_feature):
features, src_train, src_test, dst_train, dst_test, labels_train, labels_test = self.LP_data
train_edges_feature = self._concat_edges_feture(emb_feature, src_train, dst_train)
test_edges_feature = self._concat_edges_feture(emb_feature, src_test, dst_test)
model = self.get_model()
model.fit(train_edges_feature, labels_train)
score = model.predict(test_edges_feature)
f1 = f1_score(labels_test, score)
auc_score = roc_auc_score(labels_test, score)
self.result['LP'] = {'AUC': auc_score, 'F1': f1}
print("Link Prediction result: ")
print('AUC: ', auc_score)
print('F1: ', f1)
def evluate_CL(self, emb_feature, time=10, test_only=False):
features, labels, num_classes, train_idx, test_idx = self.CF_data
if test_only:
x_idx = test_idx
else:
x_idx = np.concatenate((train_idx, test_idx))
x = emb_feature[x_idx]
y = labels[x_idx]
estimator = KMeans(n_clusters=num_classes)
ARI_list = [] # adjusted_rand_score(
NMI_list = []
Purity_list = []
silhouette_score_list = []
if time:
for i in range(time):
estimator.fit(x, y)
y_pred = estimator.predict(x)
score = normalized_mutual_info_score(y, y_pred, average_method='arithmetic')
NMI_list.append(score)
s2 = adjusted_rand_score(y, y_pred)
ARI_list.append(s2)
# silhouette_score
labels = estimator.labels_
s3 = silhouette_score(x, labels, metric='euclidean')
silhouette_score_list.append(s3)
s4 = purity_score(y, y_pred)
Purity_list.append(s4)
# print('NMI_list: {}'.format(NMI_list))
score = sum(NMI_list) / len(NMI_list)
s2 = sum(ARI_list) / len(ARI_list)
s3 = sum(silhouette_score_list) / len(silhouette_score_list)
s4 = sum(Purity_list) / len(Purity_list)
print(
'NMI (10 avg): {:.4f} , ARI (10avg): {:.4f}, Purity (10avg): {:.4f}, silhouette(10avg): {:.4f}'.format(
score, s2, s4, s3))
else:
estimator.fit(x, y)
y_pred = estimator.predict(x)
score = normalized_mutual_info_score(y, y_pred)
print("NMI on all label data: {:.5f}".format(score))
self.result['CL'] = {'NMI': score, 'ARI': s2, 'Purity': s4, 'silhouette': s3}
def _concat_edges_feture(self, emb_feature, src_list, dst_list):
src_feature = emb_feature[src_list]
dst_feature = emb_feature[dst_list]
edges_feature = src_feature * dst_feature
# edges_feature = np.concatenate([src_feature, dst_feature], 1)
return edges_feature
def dump_result(self, p_emb, metric):
dir_name = ''
if 'CF' in metric:
dir_name += "CF_{:.2f}_{:.2f}_".format(self.result['CF']['Micro f1'], self.result['CF']['Macro f1'])
if 'LP' in metric:
dir_name += "LP_{:.2f}_{:.2f}_".format(self.result['LP']['AUC'], self.result['LP']['F1'])
if 'CL' in metric:
dir_name += "CL_{:.2f}_{:.2f}_{:.2f}_{:.2f}".format(self.result['CL']['NMI'], self.result['CL']['ARI'],
self.result['CL']['Purity'],
self.result['CL']['silhouette'])
model_path = os.path.join(self.result_path, dir_name)
if not os.path.exists(model_path):
os.mkdir(model_path)
with open(os.path.join(model_path, 'result.json'), 'w') as f:
json.dump(self.result, f)
np.save(os.path.join(model_path, 'p_emb.npy'), p_emb)
print("Result save in {}".format(model_path))
return model_path
|
from django.shortcuts import render
# Create your views here.
from django.views import View
from django.http import JsonResponse
class CatelloView(View):
def post(self, request, *args, **kwargs):
return JsonResponse({"ok": "POST request processed"})
def get(self, request, *args, **kwargs):
return JsonResponse({"ok": "POST request processed"})
|
from project.deliveries.product import Product
class ProductRepository:
def __init__(self):
self.products = []
def add(self, product: Product):
if product.name in [p.name for p in self.products]:
raise ValueError(f'Product {product.name} already exists.')
self.products.append(product)
return f'Product {product.name} successfully added to inventory.'
def decrease(self, product: Product, quantity: int):
product.quantity -= quantity
return f'Left quantity of {product.name}: {product.quantity}'
def find(self, product_name: str):
product = [p for p in self.products if p.name == product_name]
if product:
return product[0]
return 'None'
|
import functools
import operator
import warnings
from collections import OrderedDict, defaultdict
from contextlib import suppress
from typing import Any, Mapping, Optional, Tuple
import numpy as np
import pandas as pd
from . import dtypes, utils
from .indexing import get_indexer_nd
from .utils import is_dict_like, is_full_slice
from .variable import IndexVariable, Variable
def _get_joiner(join):
if join == 'outer':
return functools.partial(functools.reduce, operator.or_)
elif join == 'inner':
return functools.partial(functools.reduce, operator.and_)
elif join == 'left':
return operator.itemgetter(0)
elif join == 'right':
return operator.itemgetter(-1)
elif join == 'exact':
# We cannot return a function to "align" in this case, because it needs
# access to the dimension name to give a good error message.
return None
else:
raise ValueError('invalid value for join: %s' % join)
def align(*objects, join='inner', copy=True, indexes=None, exclude=frozenset(),
fill_value=dtypes.NA):
"""
Given any number of Dataset and/or DataArray objects, returns new
objects with aligned indexes and dimension sizes.
Array from the aligned objects are suitable as input to mathematical
operators, because along each dimension they have the same index and size.
Missing values (if ``join != 'inner'``) are filled with ``fill_value``.
The default fill value is NaN.
Parameters
----------
*objects : Dataset or DataArray
Objects to align.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed with
only slice operations, then the output may share memory with the input.
In either case, new xarray objects are always returned.
indexes : dict-like, optional
Any indexes explicitly provided with the `indexes` argument should be
used in preference to the aligned indexes.
exclude : sequence of str, optional
Dimensions that must be excluded from alignment
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
aligned : same as *objects
Tuple of objects with aligned coordinates.
Raises
------
ValueError
If any dimensions without labels on the arguments have different sizes,
or a different size than the size of the aligned dimension labels.
"""
if indexes is None:
indexes = {}
if not indexes and len(objects) == 1:
# fast path for the trivial case
obj, = objects
return (obj.copy(deep=copy),)
all_indexes = defaultdict(list)
unlabeled_dim_sizes = defaultdict(set)
for obj in objects:
for dim in obj.dims:
if dim not in exclude:
try:
index = obj.indexes[dim]
except KeyError:
unlabeled_dim_sizes[dim].add(obj.sizes[dim])
else:
all_indexes[dim].append(index)
# We don't reindex over dimensions with all equal indexes for two reasons:
# - It's faster for the usual case (already aligned objects).
# - It ensures it's possible to do operations that don't require alignment
# on indexes with duplicate values (which cannot be reindexed with
# pandas). This is useful, e.g., for overwriting such duplicate indexes.
joiner = _get_joiner(join)
joined_indexes = {}
for dim, matching_indexes in all_indexes.items():
if dim in indexes:
index = utils.safe_cast_to_index(indexes[dim])
if (any(not index.equals(other) for other in matching_indexes) or
dim in unlabeled_dim_sizes):
joined_indexes[dim] = index
else:
if (any(not matching_indexes[0].equals(other)
for other in matching_indexes[1:]) or
dim in unlabeled_dim_sizes):
if join == 'exact':
raise ValueError(
'indexes along dimension {!r} are not equal'
.format(dim))
index = joiner(matching_indexes)
joined_indexes[dim] = index
else:
index = matching_indexes[0]
if dim in unlabeled_dim_sizes:
unlabeled_sizes = unlabeled_dim_sizes[dim]
labeled_size = index.size
if len(unlabeled_sizes | {labeled_size}) > 1:
raise ValueError(
'arguments without labels along dimension %r cannot be '
'aligned because they have different dimension size(s) %r '
'than the size of the aligned dimension labels: %r'
% (dim, unlabeled_sizes, labeled_size))
for dim in unlabeled_dim_sizes:
if dim not in all_indexes:
sizes = unlabeled_dim_sizes[dim]
if len(sizes) > 1:
raise ValueError(
'arguments without labels along dimension %r cannot be '
'aligned because they have different dimension sizes: %r'
% (dim, sizes))
result = []
for obj in objects:
valid_indexers = {k: v for k, v in joined_indexes.items()
if k in obj.dims}
if not valid_indexers:
# fast path for no reindexing necessary
new_obj = obj.copy(deep=copy)
else:
new_obj = obj.reindex(copy=copy, fill_value=fill_value,
**valid_indexers)
new_obj.encoding = obj.encoding
result.append(new_obj)
return tuple(result)
def deep_align(objects, join='inner', copy=True, indexes=None,
exclude=frozenset(), raise_on_invalid=True,
fill_value=dtypes.NA):
"""Align objects for merging, recursing into dictionary values.
This function is not public API.
"""
from .dataarray import DataArray
from .dataset import Dataset
if indexes is None:
indexes = {}
def is_alignable(obj):
return isinstance(obj, (DataArray, Dataset))
positions = []
keys = []
out = []
targets = []
no_key = object()
not_replaced = object()
for n, variables in enumerate(objects):
if is_alignable(variables):
positions.append(n)
keys.append(no_key)
targets.append(variables)
out.append(not_replaced)
elif is_dict_like(variables):
for k, v in variables.items():
if is_alignable(v) and k not in indexes:
# Skip variables in indexes for alignment, because these
# should to be overwritten instead:
# https://github.com/pydata/xarray/issues/725
positions.append(n)
keys.append(k)
targets.append(v)
out.append(OrderedDict(variables))
elif raise_on_invalid:
raise ValueError('object to align is neither an xarray.Dataset, '
'an xarray.DataArray nor a dictionary: %r'
% variables)
else:
out.append(variables)
aligned = align(*targets, join=join, copy=copy, indexes=indexes,
exclude=exclude, fill_value=fill_value)
for position, key, aligned_obj in zip(positions, keys, aligned):
if key is no_key:
out[position] = aligned_obj
else:
out[position][key] = aligned_obj
# something went wrong: we should have replaced all sentinel values
assert all(arg is not not_replaced for arg in out)
return out
def reindex_like_indexers(target, other):
"""Extract indexers to align target with other.
Not public API.
Parameters
----------
target : Dataset or DataArray
Object to be aligned.
other : Dataset or DataArray
Object to be aligned with.
Returns
-------
Dict[Any, pandas.Index] providing indexes for reindex keyword arguments.
Raises
------
ValueError
If any dimensions without labels have different sizes.
"""
indexers = {k: v for k, v in other.indexes.items() if k in target.dims}
for dim in other.dims:
if dim not in indexers and dim in target.dims:
other_size = other.sizes[dim]
target_size = target.sizes[dim]
if other_size != target_size:
raise ValueError('different size for unlabeled '
'dimension on argument %r: %r vs %r'
% (dim, other_size, target_size))
return indexers
def reindex_variables(
variables: Mapping[Any, Variable],
sizes: Mapping[Any, int],
indexes: Mapping[Any, pd.Index],
indexers: Mapping,
method: Optional[str] = None,
tolerance: Any = None,
copy: bool = True,
fill_value: Optional[Any] = dtypes.NA,
) -> 'Tuple[OrderedDict[Any, Variable], OrderedDict[Any, pd.Index]]':
"""Conform a dictionary of aligned variables onto a new set of variables,
filling in missing values with NaN.
Not public API.
Parameters
----------
variables : dict-like
Dictionary of xarray.Variable objects.
sizes : dict-like
Dictionary from dimension names to integer sizes.
indexes : dict-like
Dictionary of indexes associated with variables.
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches.
The values of the index at the matching locations must satisfy the
equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return values is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, new xarray objects are always returned.
fill_value : scalar, optional
Value to use for newly missing values
Returns
-------
reindexed : OrderedDict
Dict of reindexed variables.
new_indexes : OrderedDict
Dict of indexes associated with the reindexed variables.
"""
from .dataarray import DataArray
# create variables for the new dataset
reindexed = OrderedDict() # type: OrderedDict[Any, Variable]
# build up indexers for assignment along each dimension
int_indexers = {}
new_indexes = OrderedDict(indexes)
masked_dims = set()
unchanged_dims = set()
for dim, indexer in indexers.items():
if isinstance(indexer, DataArray) and indexer.dims != (dim,):
warnings.warn(
"Indexer has dimensions {0:s} that are different "
"from that to be indexed along {1:s}. "
"This will behave differently in the future.".format(
str(indexer.dims), dim),
FutureWarning, stacklevel=3)
target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])
if dim in indexes:
index = indexes[dim]
if not index.is_unique:
raise ValueError(
'cannot reindex or align along dimension %r because the '
'index has duplicate values' % dim)
int_indexer = get_indexer_nd(index, target, method, tolerance)
# We uses negative values from get_indexer_nd to signify
# values that are missing in the index.
if (int_indexer < 0).any():
masked_dims.add(dim)
elif np.array_equal(int_indexer, np.arange(len(index))):
unchanged_dims.add(dim)
int_indexers[dim] = int_indexer
if dim in variables:
var = variables[dim]
args = (var.attrs, var.encoding) # type: tuple
else:
args = ()
reindexed[dim] = IndexVariable((dim,), target, *args)
for dim in sizes:
if dim not in indexes and dim in indexers:
existing_size = sizes[dim]
new_size = indexers[dim].size
if existing_size != new_size:
raise ValueError(
'cannot reindex or align along dimension %r without an '
'index because its size %r is different from the size of '
'the new index %r' % (dim, existing_size, new_size))
for name, var in variables.items():
if name not in indexers:
key = tuple(slice(None)
if d in unchanged_dims
else int_indexers.get(d, slice(None))
for d in var.dims)
needs_masking = any(d in masked_dims for d in var.dims)
if needs_masking:
new_var = var._getitem_with_mask(key, fill_value=fill_value)
elif all(is_full_slice(k) for k in key):
# no reindexing necessary
# here we need to manually deal with copying data, since
# we neither created a new ndarray nor used fancy indexing
new_var = var.copy(deep=copy)
else:
new_var = var[key]
reindexed[name] = new_var
return reindexed, new_indexes
def broadcast(*args, **kwargs):
"""Explicitly broadcast any number of DataArray or Dataset objects against
one another.
xarray objects automatically broadcast against each other in arithmetic
operations, so this function should not be necessary for normal use.
If no change is needed, the input data is returned to the output without
being copied.
Parameters
----------
*args : DataArray or Dataset objects
Arrays to broadcast against each other.
exclude : sequence of str, optional
Dimensions that must not be broadcasted
Returns
-------
broadcast : tuple of xarray objects
The same data as the input arrays, but with additional dimensions
inserted so that all data arrays have the same dimensions and shape.
Examples
--------
Broadcast two data arrays against one another to fill out their dimensions:
>>> a = xr.DataArray([1, 2, 3], dims='x')
>>> b = xr.DataArray([5, 6], dims='y')
>>> a
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) int64 0 1 2
>>> b
<xarray.DataArray (y: 2)>
array([5, 6])
Coordinates:
* y (y) int64 0 1
>>> a2, b2 = xr.broadcast(a, b)
>>> a2
<xarray.DataArray (x: 3, y: 2)>
array([[1, 1],
[2, 2],
[3, 3]])
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
>>> b2
<xarray.DataArray (x: 3, y: 2)>
array([[5, 6],
[5, 6],
[5, 6]])
Coordinates:
* y (y) int64 0 1
* x (x) int64 0 1 2
Fill out the dimensions of all data variables in a dataset:
>>> ds = xr.Dataset({'a': a, 'b': b})
>>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset
>>> ds2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 0 1
Data variables:
a (x, y) int64 1 1 2 2 3 3
b (x, y) int64 5 6 5 6 5 6
"""
from .dataarray import DataArray
from .dataset import Dataset
exclude = kwargs.pop('exclude', None)
if exclude is None:
exclude = set()
if kwargs:
raise TypeError('broadcast() got unexpected keyword arguments: %s'
% list(kwargs))
args = align(*args, join='outer', copy=False, exclude=exclude)
common_coords = OrderedDict()
dims_map = OrderedDict()
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.coords:
common_coords[dim] = arg.coords[dim].variable
def _set_dims(var):
# Add excluded dims to a copy of dims_map
var_dims_map = dims_map.copy()
for dim in exclude:
with suppress(ValueError):
# ignore dim not in var.dims
var_dims_map[dim] = var.shape[var.dims.index(dim)]
return var.set_dims(var_dims_map)
def _broadcast_array(array):
data = _set_dims(array.variable)
coords = OrderedDict(array.coords)
coords.update(common_coords)
return DataArray(data, coords, data.dims, name=array.name,
attrs=array.attrs)
def _broadcast_dataset(ds):
data_vars = OrderedDict(
(k, _set_dims(ds.variables[k]))
for k in ds.data_vars)
coords = OrderedDict(ds.coords)
coords.update(common_coords)
return Dataset(data_vars, coords, ds.attrs)
result = []
for arg in args:
if isinstance(arg, DataArray):
result.append(_broadcast_array(arg))
elif isinstance(arg, Dataset):
result.append(_broadcast_dataset(arg))
else:
raise ValueError('all input must be Dataset or DataArray objects')
return tuple(result)
def broadcast_arrays(*args):
import warnings
warnings.warn('xarray.broadcast_arrays is deprecated: use '
'xarray.broadcast instead', DeprecationWarning, stacklevel=2)
return broadcast(*args)
|
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `points` module."""
from __future__ import division
import logging
import numpy as np
from numpy.testing import assert_array_almost_equal
from metpy.gridding.points import (generate_grid, generate_grid_coords, get_boundary_coords,
get_point_count_within_r, get_points_within_r,
get_xy_range, get_xy_steps)
logging.getLogger('metpy.gridding.points').setLevel(logging.ERROR)
def test_get_points_within_r():
r"""Test get points within a radius function."""
x = list(range(10))
y = list(range(10))
center = [1, 5]
radius = 5
matches = get_points_within_r(center, list(zip(x, y)), radius).T
truth = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
assert_array_almost_equal(truth, matches)
def test_get_point_count_within_r():
r"""Test get point count within a radius function."""
x = list(range(10))
y = list(range(10))
center1 = [1, 5]
center2 = [12, 10]
radius = 5
count = get_point_count_within_r([center1, center2], list(zip(x, y)), radius)
truth = np.array([5, 2])
assert_array_almost_equal(truth, count)
def test_get_boundary_coords():
r"""Test get spatial corners of data positions function."""
x = list(range(10))
y = list(range(10))
bbox = get_boundary_coords(x, y)
truth = dict(east=9, north=9, south=0, west=0)
assert bbox == truth
bbox = get_boundary_coords(x, y, 10)
truth = dict(east=19, north=19, south=-10, west=-10)
assert bbox == truth
def test_get_xy_steps():
r"""Test get count of grids function."""
x = list(range(10))
y = list(range(10))
bbox = get_boundary_coords(x, y)
x_steps, y_steps = get_xy_steps(bbox, 3)
truth_x = 3
truth_y = 3
assert x_steps == truth_x
assert y_steps == truth_y
def test_get_xy_range():
r"""Test get range of data positions function."""
x = list(range(10))
y = list(range(10))
bbox = get_boundary_coords(x, y)
x_range, y_range = get_xy_range(bbox)
truth_x = 9
truth_y = 9
assert truth_x == x_range
assert truth_y == y_range
def test_generate_grid():
r"""Test generate grid function."""
x = list(range(10))
y = list(range(10))
bbox = get_boundary_coords(x, y)
gx, gy = generate_grid(3, bbox)
truth_x = np.array([[0.0, 4.5, 9.0],
[0.0, 4.5, 9.0],
[0.0, 4.5, 9.0]])
truth_y = np.array([[0.0, 0.0, 0.0],
[4.5, 4.5, 4.5],
[9.0, 9.0, 9.0]])
assert_array_almost_equal(gx, truth_x)
assert_array_almost_equal(gy, truth_y)
def test_generate_grid_coords():
r"""Test generate grid coordinates function."""
x = list(range(10))
y = list(range(10))
bbox = get_boundary_coords(x, y)
gx, gy = generate_grid(3, bbox)
truth = [[0.0, 0.0],
[4.5, 0.0],
[9.0, 0.0],
[0.0, 4.5],
[4.5, 4.5],
[9.0, 4.5],
[0.0, 9.0],
[4.5, 9.0],
[9.0, 9.0]]
pts = generate_grid_coords(gx, gy)
assert_array_almost_equal(truth, pts)
|
import os
from functools import lru_cache
from typing import Optional, Tuple, Dict
from quarkchain.utils import check
from qkchash.qkchash import (
CACHE_ENTRIES,
make_cache,
qkchash,
QkcHashNative,
get_seed_from_block_number,
)
def get_qkchashlib_path():
"""Assuming libqkchash.so is in the same dir as this file"""
return os.path.join(os.path.dirname(__file__), "libqkchash.so")
def init_qkc_hash_native():
qkc_hash_native = QkcHashNative(get_qkchashlib_path())
check(qkc_hash_native is not None)
return qkc_hash_native
QKC_HASH_NATIVE = init_qkc_hash_native()
@lru_cache(maxsize=128)
def make_cache_fast(entries, seed):
return make_cache(entries, seed)
def get_mining_output(
block_number: int,
header_hash: bytes,
nonce: bytes,
qkchash_with_rotation_stats: bool = False,
) -> Dict[str, bytes]:
seed = get_seed_from_block_number(block_number)
if QKC_HASH_NATIVE is None:
current_cache = make_cache_fast(CACHE_ENTRIES, seed)[:]
mining_output = qkchash(header_hash, nonce, current_cache)
else:
current_cache = QKC_HASH_NATIVE.make_cache(CACHE_ENTRIES, seed)
mining_output = QKC_HASH_NATIVE.calculate_hash(
header_hash, nonce, current_cache, qkchash_with_rotation_stats
)
return mining_output
@lru_cache(maxsize=32)
def check_pow(
block_number: int,
header_hash: bytes,
mixhash: bytes,
bin_nonce: bytes,
difficulty: int,
qkchash_with_rotation_stats: bool = False,
) -> bool:
"""Check if the proof-of-work of the block is valid."""
if len(mixhash) != 32 or len(header_hash) != 32 or len(bin_nonce) != 8:
return False
mining_output = get_mining_output(
block_number, header_hash, bin_nonce, qkchash_with_rotation_stats
)
if mining_output["mix digest"] != mixhash:
return False
result = int.from_bytes(mining_output["result"], byteorder="big")
return result <= 2 ** 256 // (difficulty or 1)
class QkchashMiner:
def __init__(
self,
block_number: int,
difficulty: int,
header_hash: bytes,
qkchash_with_rotation_stats: bool = False,
):
self.block_number = block_number
self.difficulty = difficulty
self.header_hash = header_hash
self.qkchash_with_rotation_stats = qkchash_with_rotation_stats
def mine(
self, rounds=1000, start_nonce=0
) -> Tuple[Optional[bytes], Optional[bytes]]:
bin_nonce, mixhash = mine(
self.block_number,
self.difficulty,
self.header_hash,
start_nonce=start_nonce,
rounds=rounds,
qkchash_with_rotation_stats=self.qkchash_with_rotation_stats,
)
if bin_nonce is not None:
return bin_nonce, mixhash
return None, None
def mine(
block_number: int,
difficulty: int,
header_hash: bytes,
start_nonce: int = 0,
rounds: int = 1000,
qkchash_with_rotation_stats: bool = False,
) -> Tuple[Optional[bytes], Optional[bytes]]:
nonce = start_nonce
target = 2 ** 256 // (difficulty or 1)
for i in range(1, rounds + 1):
# hashimoto expected big-indian byte representation
bin_nonce = (nonce + i).to_bytes(8, byteorder="big")
mining_output = get_mining_output(
block_number, header_hash, bin_nonce, qkchash_with_rotation_stats
)
result = int.from_bytes(mining_output["result"], byteorder="big")
if result <= target:
assert len(bin_nonce) == 8
assert len(mining_output["mix digest"]) == 32
return bin_nonce, mining_output["mix digest"]
return None, None
|
import sys
import os
import numpy as np
from PIL import Image, ImageFont, ImageDraw
import cv2
def main(file_template, start=0, end=100000):
font = ImageFont.truetype("Menlo.ttc", 32)
capture = cv2.VideoCapture(0)
out = cv2.VideoWriter(
"out.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 20.0, (1640, 1232)
)
print(f"Processing images {start} to {end} inclusive")
for j in range(start, end + 1):
filename = file_template % j
print(filename)
if not os.path.exists(filename):
break
image = Image.open(filename)
timestamp = image._getexif()[36867]
day, time = timestamp.split()
day = day.replace(":", "/")
time = time[:5]
draw = ImageDraw.Draw(image)
draw.text((16, 16), f"{time}", (0xFF, 0xFF, 0xFF), font=font)
# this inverts RGB but we don't care because monochrome
out.write(np.asarray(image))
out.release()
capture.release()
if __name__ == "__main__":
main(sys.argv[1], start=int(sys.argv[2]), end=int(sys.argv[3]))
|
import pandas as pd
import numpy as np
# ======================================================================================================================
# Model of distribution system (=cooling grid) CLASS
# ======================================================================================================================
class CoolingGrid:
"""
Simulation-tool for the distribution system, including distribution piping system (DPS) and energy transfer
stations (ETS). Includes non-linear equations that are not used in class optimizer. Allows the calculation of line-
flow, head-losses and nodal-head distribution at a given set of nodal water consumptions.
"""
# INITIALIZATION ===================================================================================================
def __init__(
self,
parameters
):
# Saving parameters --------------------------------------------------------------------------------------------
self.parameters = parameters
# Forming incidence matrices of the Digraph --------------------------------------------------------------------
# Incidence matrix of full grid
dict_temp = {}
for n_id in self.parameters.nodes.index:
list_temp = []
for l_id in self.parameters.lines.index:
if n_id == self.parameters.lines["Start"][l_id]:
list_temp.append(-1)
elif n_id == self.parameters.lines["End"][l_id]:
list_temp.append(+1)
else:
list_temp.append(0)
dict_temp[n_id] = list_temp
self.incidence_matrix_complete = pd.DataFrame(dict_temp, columns=dict_temp.keys())
self.incidence_matrix_complete.index = list(self.parameters.lines.index)
# Excluding reference node (root) - having a predefined head of 0 - from matrix, resulting in square incidence
# matrix, suitable for direct calculation
for n_id in self.parameters.nodes.index:
if self.parameters.nodes["Type"][n_id] == "reference":
self.incidence_matrix_potential = self.incidence_matrix_complete[[n_id]]
self.incidence_matrix = self.incidence_matrix_complete.drop(columns=n_id)
# Transposing square incidence matrix
self.incidence_matrix_transposed = self.incidence_matrix.transpose()
# METHOD DEFINITIONS ===============================================================================================
# Methods to calculate the steady-state, non-linear hydraulic-equilibrium of a tree-like grid ----------------------
def build_ets_flow_time_array(
self,
ets_flow_vector
):
ets_flow_dict = {
time_step: ets_flow_vector
for time_step in self.parameters.environment.index
}
ets_flows_frame = pd.DataFrame(
data=ets_flow_dict,
index=list(self.parameters.buildings.index)
)
return ets_flows_frame
def get_nodal_consumptions_time_array(
self,
ets_flows_time_array
):
# Creating junction-DataFrame relating 0 water consumption with all junction-nodes over time
junction_nodes_ids = []
for nodes_id in self.parameters.nodes.index:
if self.parameters.nodes["Type"][nodes_id] == "junction":
junction_nodes_ids.append(nodes_id)
junction_consumptions_dict = {
time_step: [0*id for id in junction_nodes_ids]
for time_step in self.parameters.environment.index
}
junction_consumptions_frame = pd.DataFrame(
data=junction_consumptions_dict, index=[id for id in junction_nodes_ids])
# Merging junction-DataFrame with ets-flow-DataFrame
nodal_consumptions_over_time = pd.concat([ets_flows_time_array, junction_consumptions_frame])
# Sort according to index
nodal_consumptions_over_time.sort_index(inplace=True)
return nodal_consumptions_over_time
@staticmethod
def get_reference_node_consumption_time_row(
nodal_consumptions_time_array
):
reference_node_consumption_dict = {
time_step: -nodal_consumptions_time_array[time_step].sum()
for time_step in nodal_consumptions_time_array.columns
}
reference_node_consumption_time_row = pd.DataFrame(
data=reference_node_consumption_dict,
index=[0]
)
return reference_node_consumption_time_row
def get_line_flows_time_array(
self,
nodal_consumptions_time_array
):
line_flows_temp = {
time_step: np.linalg.solve(
self.incidence_matrix_transposed.values,
nodal_consumptions_time_array[time_step].values
)
for time_step in nodal_consumptions_time_array.columns
}
line_flows = pd.DataFrame(
data=line_flows_temp,
index=list(self.incidence_matrix.index))
return line_flows
@staticmethod
def get_pipe_velocity(
pipe_flow,
pipe_diameter
):
pipe_velocity = 4 * pipe_flow / (np.pi * pipe_diameter ** 2)
return pipe_velocity
def get_reynold(
self,
pipe_flow,
pipe_diameter
):
"""
:var pipe_flow: volumetric flow through the pipe in cubic metres per second [cbm/s].
:param pipe_diameter: in metres [m].
:return: Reynolds number, dimensionless in terms of unit.
"""
pipe_velocity = self.get_pipe_velocity(pipe_flow, pipe_diameter)
reynold = np.fabs(pipe_velocity) * pipe_diameter / self.parameters.physics["water kinematic viscosity [m^2/s]"]
return reynold
def get_pipe_friction_factor(
self,
pipe_flow,
pipe_diameter,
pipe_roughness
):
"""
:var pipe_flow: volumetric flow through the pipe in cubic metres per second [cbm/s].
:param pipe_diameter: in metres [m].
:param pipe_roughness: absolute roughness (epsilon) in millimeters [mm].
:return: Darcy-Weisbach friction factor f, dimensionless in terms of unit.
"""
pipe_velocity = self.get_pipe_velocity(pipe_flow, pipe_diameter)
reynold = self.get_reynold(pipe_velocity, pipe_diameter)
# No flow at all
if reynold == 0:
pipe_friction_factor = 0
return pipe_friction_factor
# Laminar Flow, based on Hagen-Poiseuille velocity profile, analytical correlation
elif 0 < reynold < 4000:
pipe_friction_factor = 64 / reynold
return pipe_friction_factor
# Turbulent flow, Swamee-Jain formula, approximating correlation of Colebrook-White equation
elif 4000 <= reynold <= 100000000 and 0.000001 <= ((pipe_roughness/1000) / pipe_diameter) <= 0.01:
pipe_friction_factor = 1.325 / (
np.log(
(pipe_roughness / 1000) / (3.7 * pipe_diameter) + 5.74 / (reynold ** 0.9)
)
) ** 2
return pipe_friction_factor
# Outside of scope:
else:
return "Error"
def get_pipe_head_loss(
self,
pipe_flow,
pipe_diameter,
pipe_roughness,
pipe_length
):
"""
:var pipe_flow: volumetric flow through the pipe in cubic metres per second [cbm/s].
:param pipe_diameter: in metres [m].
:param pipe_roughness: absolute roughness (epsilon) in millimeters [mm].
:param pipe_length: in meters [m].
:return: pipe head in meters of water [m].
"""
pipe_friction_factor = self.get_pipe_friction_factor(pipe_flow, pipe_diameter, pipe_roughness)
# Darcy-Weisbach Equation
pipe_head_loss = pipe_friction_factor * 8 * pipe_length * pipe_flow * np.fabs(pipe_flow) / (
self.parameters.physics["gravitational acceleration [m^2/s]"] * (np.pi ** 2) * pipe_diameter ** 5
)
return pipe_head_loss
def get_line_head_loss_time_array(
self,
line_flow_time_array
):
"""
:var line_flows: all volumetric flows through the lines (pipes) listed inside a panda,
in cubic metres per second [cbm/s].
:param self.lines_parameters: all parameters related to the grid's lines.
:return: All head losses occurring over the grid's lines due to friction listed inside a panda, in meters of
water [m].
"""
line_head_loss_dict = {
time_step: [
self.get_pipe_head_loss(
line_flow_time_array[time_step][line_id],
self.parameters.lines["Diameter [m]"][line_id],
self.parameters.lines["Absolute Roughness [mm]"][line_id],
self.parameters.lines["Length [m]"][line_id]
)
for line_id in line_flow_time_array.index
]
for time_step in line_flow_time_array.columns
}
line_head_loss_frame = pd.DataFrame(
data=line_head_loss_dict,
index=list(line_flow_time_array.index)
)
return line_head_loss_frame
def get_nodal_head_time_array(
self,
line_head_loss_time_array
):
"""
:var line_heads: all head losses occurring over the grid's lines due to friction listed inside a panda, in
meters of water [m].
:param self.incidence_matrix: grid-layout expressed as incidence matrix.
:return: nodal_heads: Total heads occurring at all nodes of the grid, listed inside a panda, in meters of
water [m].
"""
nodal_head_calculation_dict = {
time_step: np.linalg.solve(
self.incidence_matrix.values,
-line_head_loss_time_array[time_step].values
)
for time_step in line_head_loss_time_array.columns
}
nodal_head_calculation_frame = pd.DataFrame(
data=nodal_head_calculation_dict,
index=list(self.incidence_matrix_transposed.index)
)
reference_node_head_dict = {
time_step: 0
for time_step in line_head_loss_time_array.columns
}
reference_node_head_frame = pd.DataFrame(
data=reference_node_head_dict,
index=[0]
)
all_nodal_heads_frame = pd.concat(
[
reference_node_head_frame,
nodal_head_calculation_frame
]
)
return all_nodal_heads_frame
def get_tree_equilibrium_time_array(
self,
ets_flow_time_array
):
nodal_consumptions_time_array = self.get_nodal_consumptions_time_array(
ets_flows_time_array=ets_flow_time_array
)
reference_node_consumption_time_row = self.get_reference_node_consumption_time_row(
nodal_consumptions_time_array=nodal_consumptions_time_array
)
line_flow_time_array = self.get_line_flows_time_array(
nodal_consumptions_time_array=nodal_consumptions_time_array
)
line_head_loss_time_array = self.get_line_head_loss_time_array(
line_flow_time_array=line_flow_time_array
)
nodal_head_time_array = self.get_nodal_head_time_array(
line_head_loss_time_array=line_head_loss_time_array
)
tree_equilibrium_time_array = pd.concat(
[
nodal_consumptions_time_array,
reference_node_consumption_time_row,
line_flow_time_array,
line_head_loss_time_array,
nodal_head_time_array
],
keys=[
'Nodal consumptions [qbm/s]',
'Ref. node consumption [qbm/s]',
'Flow in lines [qbm/s]',
'Head loss over lines [m]',
'Total head at nodes [m]'
]
)
tree_equilibrium_time_array.index.names = ['VARIABLES', 'IDs']
return tree_equilibrium_time_array
# Methods for calculating electrical power demand of pumps in distribution system ----------------------------------
def get_ets_head_difference_time_array(
self,
nodal_head_time_array
):
ets_head_difference_time_array_dict = {
time_step: [
2
* np.fabs(nodal_head_time_array[time_step][building_id])
+ self.parameters.distribution_system["head loss in ETS [m]"]
for building_id in self.parameters.buildings.index
] for time_step in nodal_head_time_array.columns
}
ets_head_difference_time_array_frame = pd.DataFrame(
data=ets_head_difference_time_array_dict,
index=list(self.parameters.buildings.index)
)
return ets_head_difference_time_array_frame
def get_central_pumping_power_time_row(
self,
ets_head_difference_time_array,
central_flow_time_row
):
central_pumping_power_time_dict = {
time_step: (
(1 / self.parameters.distribution_system["pump efficiency secondary pump [-]"])
* self.parameters.physics["water density [kg/m^3]"]
* self.parameters.physics["gravitational acceleration [m^2/s]"]
* ets_head_difference_time_array[time_step].max()
* np.fabs(central_flow_time_row[time_step][0])
)
for time_step in ets_head_difference_time_array.columns
}
central_pumping_power_time_row = pd.DataFrame(
data=central_pumping_power_time_dict,
index=[0]
)
return central_pumping_power_time_row
def get_distributed_pumping_power_time_array(
self,
ets_head_difference_time_array,
nodal_consumptions_time_array
):
distributed_pumping_power_time_dict = {
time_step: [
(1 / self.parameters.distribution_system["pump efficiency secondary pump [-]"])
* self.parameters.physics["water density [kg/m^3]"]
* self.parameters.physics["gravitational acceleration [m^2/s]"]
* ets_head_difference_time_array[time_step][ets_id]
* np.fabs(nodal_consumptions_time_array[time_step][ets_id])
for ets_id in ets_head_difference_time_array.index
]
for time_step in ets_head_difference_time_array.columns
}
distributed_pumping_power_time_array = pd.DataFrame(
data=distributed_pumping_power_time_dict,
index=list(ets_head_difference_time_array.index)
)
return distributed_pumping_power_time_array
def get_grid_pumping(
self,
tree_equilibrium_time_array
):
ets_head_difference_time_array = self.get_ets_head_difference_time_array(
nodal_head_time_array=tree_equilibrium_time_array.loc["Total head at nodes [m]"]
)
central_pumping_power_time_row = self.get_central_pumping_power_time_row(
ets_head_difference_time_array=ets_head_difference_time_array,
central_flow_time_row=tree_equilibrium_time_array.loc["Ref. node consumption [qbm/s]"]
)
distributed_pumping_power_time_array = self.get_distributed_pumping_power_time_array(
ets_head_difference_time_array=ets_head_difference_time_array,
nodal_consumptions_time_array=tree_equilibrium_time_array.loc["Nodal consumptions [qbm/s]"]
)
overall_distributed_pumping_power_time_dict = {
time_step: distributed_pumping_power_time_array[time_step].sum()
for time_step in tree_equilibrium_time_array.columns
}
overall_distributed_pumping_power_time_row = pd.DataFrame(
data=overall_distributed_pumping_power_time_dict,
index=[None]
)
grid_pumping = pd.concat(
[
ets_head_difference_time_array,
central_pumping_power_time_row,
overall_distributed_pumping_power_time_row,
distributed_pumping_power_time_array
],
keys=[
'Head difference over ETSs [m]',
'CSP power [W]',
'Overall DSP power [W]',
'DSP power at ETSs [W]'
]
)
grid_pumping.index.names = ['VARIABLES', 'IDs']
return grid_pumping
# Method triggering a complete non-linear simulation of the distribution system ------------------------------------
def get_grid_simulation(
self,
ets_flow_time_array
):
# Calculating hydraulic equilibrium of return-side grid
tree_equilibrium_time_array = self.get_tree_equilibrium_time_array(
ets_flow_time_array=ets_flow_time_array
)
# Based on hydraulic equilibrium of return-side grid, calculating pumping powers of Distribution System
distribution_system_pumping = self.get_grid_pumping(
tree_equilibrium_time_array=tree_equilibrium_time_array
)
# All calculated results are packed into one DataFrame
distribution_system_simulation = pd.concat(
[
tree_equilibrium_time_array,
distribution_system_pumping
]
)
return distribution_system_simulation
# Methods used by optimizer ---------------------------------------------------------------------------------------
def get_heat_intake_from_ets_flow(
self,
ets_flow
):
heat_flow_from_building = (
self.parameters.physics["water density [kg/m^3]"]
* self.parameters.physics["specific enthalpy difference DW [J/kg]"]
* ets_flow
)
return heat_flow_from_building
# Methods for planning ---------------------------------------------------------------------------------------------
def get_diameters_from_flow(
self,
line_flows,
u_max
):
diameters_dict = {
line: [(4 * line_flows[line] / (np.pi * u_max))**0.5]
for line in line_flows.index
}
diameters_df = pd.DataFrame.from_dict(
data=diameters_dict,
orient='index',
columns=['Critical diameter [m]']
)
return diameters_df
|
'''
Created by auto_sdk on 2016.05.24
'''
from top.api.base import RestApi
class AlibabaAliqinFcSmsNumSendRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.extend = None
self.rec_num = None
self.sms_free_sign_name = None
self.sms_param = None
self.sms_template_code = None
self.sms_type = None
def getapiname(self):
return 'alibaba.aliqin.fc.sms.num.send'
|
# coding: utf-8
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TaskCreateRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'org_id': 'str',
'org': 'str',
'status': 'TaskStatusType',
'flux': 'str',
'description': 'str'
}
attribute_map = {
'type': 'type',
'org_id': 'orgID',
'org': 'org',
'status': 'status',
'flux': 'flux',
'description': 'description'
}
def __init__(self, type=None, org_id=None, org=None, status=None, flux=None, description=None): # noqa: E501,D401,D403
"""TaskCreateRequest - a model defined in OpenAPI.""" # noqa: E501
self._type = None
self._org_id = None
self._org = None
self._status = None
self._flux = None
self._description = None
self.discriminator = None
if type is not None:
self.type = type
if org_id is not None:
self.org_id = org_id
if org is not None:
self.org = org
if status is not None:
self.status = status
self.flux = flux
if description is not None:
self.description = description
@property
def type(self):
"""Get the type of this TaskCreateRequest.
The type of task, this can be used for filtering tasks on list actions.
:return: The type of this TaskCreateRequest.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this TaskCreateRequest.
The type of task, this can be used for filtering tasks on list actions.
:param type: The type of this TaskCreateRequest.
:type: str
""" # noqa: E501
self._type = type
@property
def org_id(self):
"""Get the org_id of this TaskCreateRequest.
The ID of the organization that owns this Task.
:return: The org_id of this TaskCreateRequest.
:rtype: str
""" # noqa: E501
return self._org_id
@org_id.setter
def org_id(self, org_id):
"""Set the org_id of this TaskCreateRequest.
The ID of the organization that owns this Task.
:param org_id: The org_id of this TaskCreateRequest.
:type: str
""" # noqa: E501
self._org_id = org_id
@property
def org(self):
"""Get the org of this TaskCreateRequest.
The name of the organization that owns this Task.
:return: The org of this TaskCreateRequest.
:rtype: str
""" # noqa: E501
return self._org
@org.setter
def org(self, org):
"""Set the org of this TaskCreateRequest.
The name of the organization that owns this Task.
:param org: The org of this TaskCreateRequest.
:type: str
""" # noqa: E501
self._org = org
@property
def status(self):
"""Get the status of this TaskCreateRequest.
:return: The status of this TaskCreateRequest.
:rtype: TaskStatusType
""" # noqa: E501
return self._status
@status.setter
def status(self, status):
"""Set the status of this TaskCreateRequest.
:param status: The status of this TaskCreateRequest.
:type: TaskStatusType
""" # noqa: E501
self._status = status
@property
def flux(self):
"""Get the flux of this TaskCreateRequest.
The Flux script to run for this task.
:return: The flux of this TaskCreateRequest.
:rtype: str
""" # noqa: E501
return self._flux
@flux.setter
def flux(self, flux):
"""Set the flux of this TaskCreateRequest.
The Flux script to run for this task.
:param flux: The flux of this TaskCreateRequest.
:type: str
""" # noqa: E501
if flux is None:
raise ValueError("Invalid value for `flux`, must not be `None`") # noqa: E501
self._flux = flux
@property
def description(self):
"""Get the description of this TaskCreateRequest.
An optional description of the task.
:return: The description of this TaskCreateRequest.
:rtype: str
""" # noqa: E501
return self._description
@description.setter
def description(self, description):
"""Set the description of this TaskCreateRequest.
An optional description of the task.
:param description: The description of this TaskCreateRequest.
:type: str
""" # noqa: E501
self._description = description
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, TaskCreateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterisk '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterisk '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterisk '*'
can also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterisk '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outgoing traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.source_address_prefixes = source_address_prefixes
self.source_application_security_groups = source_application_security_groups
self.destination_address_prefix = destination_address_prefix
self.destination_address_prefixes = destination_address_prefixes
self.destination_application_security_groups = destination_application_security_groups
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
#!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
required: false
default: None
project:
description:
- Unique name or ID of the project.
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
ROUTER_INTERFACE_OWNERS = set([
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
])
def _router_internal_interfaces(cloud, router):
for port in cloud.list_router_interfaces(router, 'internal'):
if port['device_owner'] in ROUTER_INTERFACE_OWNERS:
yield port
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in _router_internal_interfaces(cloud, router):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return external_subnet_ids, internal_subnet_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
elif internal_ids:
router = updated_router
ports = _router_internal_interfaces(cloud, router)
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = _router_internal_interfaces(cloud, router)
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
import os
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import pandas as pd
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
Base = declarative_base()
class Post(Base):
"""Database table to store Craigslist Housing information post filter
and clean."""
__tablename__ = "posts"
id = Column(Integer, primary_key=True)
post_id = Column(Integer, unique=True)
title = Column(String)
url = Column(String, unique=True)
date = Column(DateTime)
price = Column(Float)
neighborhood = Column(String)
address = Column(String)
housing_type = Column(String)
laundry = Column(String)
parking = Column(String)
bedrooms = Column(Integer)
sqft = Column(Integer)
def return_new(posts):
"""Return pandas dataframe with new posts after comparison with
user's data in database. Send new posts to write to user's database."""
engine = get_engine()
session = get_session(engine)
# Declare empty pandas DataFrame to store new posts
new_posts = pd.DataFrame(columns=list(posts))
for _, post in posts.iterrows():
post_session = session.query(Post).filter_by(post_id=post.get("PostID")).first()
if post_session:
# Don't add post to db if it already exists
continue
# Write posts found to db and append to new_posts df
write_to_db(post, session)
new_posts = new_posts.append(post)
return new_posts
def get_session(engine):
"""Build sqlalchemy session."""
Session = sessionmaker(bind=engine)
return Session()
def get_engine():
"""Build sqlalchemy engine."""
engine = create_engine(
"sqlite:///" + os.path.join(BASE_DIR, "posts", "posts.db") + "?check_same_thread=False",
echo=False,
)
Base.metadata.create_all(engine)
return engine
def write_to_db(post, session):
"""Link post content and write to database."""
post_db = Post(
post_id=post["PostID"],
title=post["Title"],
url=post["URL"],
date=post["DateUpdated"],
price=post["Price"],
neighborhood=post["Neighborhood"],
address=post["Address"],
housing_type=post["HousingType"],
laundry=post["Laundry"],
parking=post["Parking"],
bedrooms=post["Bedrooms"],
sqft=post["AreaFt2"],
)
session.add(post_db)
session.commit()
def drop_db():
"""Drop all tables from engine."""
engine = get_engine()
Post.__table__.drop(engine)
|
#!/usr/bin/env python3
import unittest
from framework import tag_fixme_vpp_workers
from framework import VppTestCase, VppTestRunner
from vpp_udp_encap import find_udp_encap, VppUdpEncap
from vpp_udp_decap import VppUdpDecap
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, VppMplsLabel, \
VppMplsTable, VppMplsRoute, FibPathType, FibPathProto
from vpp_neighbor import VppNeighbor
from vpp_papi import VppEnum
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS
NUM_PKTS = 67
@tag_fixme_vpp_workers
class TestUdpEncap(VppTestCase):
""" UDP Encap Test Case """
@classmethod
def setUpClass(cls):
super(TestUdpEncap, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestUdpEncap, cls).tearDownClass()
def setUp(self):
super(TestUdpEncap, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# setup interfaces
# assign them different tables.
table_id = 0
self.tables = []
for i in self.pg_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
self.tables.append(tbl)
tbl = VppIpTable(self, table_id, is_ip6=1)
tbl.add_vpp_config()
self.tables.append(tbl)
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
table_id += 1
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.set_table_ip4(0)
i.set_table_ip6(0)
i.admin_down()
super(TestUdpEncap, self).tearDown()
def validate_outer4(self, rx, encap_obj):
self.assertEqual(rx[IP].src, encap_obj.src_ip_s)
self.assertEqual(rx[IP].dst, encap_obj.dst_ip_s)
self.assertEqual(rx[UDP].sport, encap_obj.src_port)
self.assertEqual(rx[UDP].dport, encap_obj.dst_port)
def validate_outer6(self, rx, encap_obj):
self.assertEqual(rx[IPv6].src, encap_obj.src_ip_s)
self.assertEqual(rx[IPv6].dst, encap_obj.dst_ip_s)
self.assertEqual(rx[UDP].sport, encap_obj.src_port)
self.assertEqual(rx[UDP].dport, encap_obj.dst_port)
def validate_inner4(self, rx, tx, ttl=None):
self.assertEqual(rx[IP].src, tx[IP].src)
self.assertEqual(rx[IP].dst, tx[IP].dst)
if ttl:
self.assertEqual(rx[IP].ttl, ttl)
else:
self.assertEqual(rx[IP].ttl, tx[IP].ttl)
def validate_inner6(self, rx, tx, hlim=None):
self.assertEqual(rx.src, tx[IPv6].src)
self.assertEqual(rx.dst, tx[IPv6].dst)
if hlim:
self.assertEqual(rx.hlim, hlim)
else:
self.assertEqual(rx.hlim, tx[IPv6].hlim)
def test_udp_encap(self):
""" UDP Encap test
"""
#
# construct a UDP encap object through each of the peers
# v4 through the first two peers, v6 through the second.
# The last encap is v4 and is used to check the codepath
# where 2 different udp encap objects are processed at the
# same time
#
udp_encap_0 = VppUdpEncap(self,
self.pg0.local_ip4,
self.pg0.remote_ip4,
330, 440)
udp_encap_1 = VppUdpEncap(self,
self.pg1.local_ip4,
self.pg1.remote_ip4,
331, 441,
table_id=1)
udp_encap_2 = VppUdpEncap(self,
self.pg2.local_ip6,
self.pg2.remote_ip6,
332, 442,
table_id=2)
udp_encap_3 = VppUdpEncap(self,
self.pg3.local_ip6,
self.pg3.remote_ip6,
333, 443,
table_id=3)
udp_encap_4 = VppUdpEncap(self,
self.pg0.local_ip4,
self.pg0.remote_ip4,
334, 444)
udp_encap_0.add_vpp_config()
udp_encap_1.add_vpp_config()
udp_encap_2.add_vpp_config()
udp_encap_3.add_vpp_config()
udp_encap_4.add_vpp_config()
self.logger.info(self.vapi.cli("sh udp encap"))
self.assertTrue(find_udp_encap(self, udp_encap_2))
self.assertTrue(find_udp_encap(self, udp_encap_3))
self.assertTrue(find_udp_encap(self, udp_encap_0))
self.assertTrue(find_udp_encap(self, udp_encap_1))
self.assertTrue(find_udp_encap(self, udp_encap_4))
#
# Routes via each UDP encap object - all combinations of v4 and v6.
#
route_4o4 = VppIpRoute(
self, "1.1.0.1", 24,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap_0.id,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)],
table_id=1)
# specific route to match encap4, to test encap of 2 packets using 2
# different encap
route_4o4_2 = VppIpRoute(
self, "1.1.0.2", 32,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap_4.id,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)],
table_id=1)
route_4o6 = VppIpRoute(
self, "1.1.2.1", 32,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap_2.id,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
route_6o4 = VppIpRoute(
self, "2001::1", 128,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap_1.id,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_6o6 = VppIpRoute(
self, "2001::3", 128,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=udp_encap_3.id,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_4o6.add_vpp_config()
route_6o6.add_vpp_config()
route_6o4.add_vpp_config()
route_4o4.add_vpp_config()
route_4o4_2.add_vpp_config()
#
# 4o4 encap
# we add a single packet matching the last encap at the beginning of
# the packet vector so that we encap 2 packets with different udp
# encap object at the same time
#
p_4o4 = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="2.2.2.2", dst="1.1.0.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
p_4o4_2 = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="2.2.2.2", dst="1.1.0.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(
self.pg1, p_4o4_2 * 1 + p_4o4 * (NUM_PKTS - 1), self.pg0)
# checking encap4 magic packet
p = rx.pop(0)
self.validate_outer4(p, udp_encap_4)
p = IP(p["UDP"].payload.load)
self.validate_inner4(p, p_4o4_2)
self.assertEqual(udp_encap_4.get_stats()['packets'], 1)
# checking remaining packets for encap0
for p in rx:
self.validate_outer4(p, udp_encap_0)
p = IP(p["UDP"].payload.load)
self.validate_inner4(p, p_4o4)
self.assertEqual(udp_encap_0.get_stats()['packets'], NUM_PKTS - 1)
#
# 4o6 encap
#
p_4o6 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="2.2.2.2", dst="1.1.2.1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_4o6*NUM_PKTS, self.pg2)
for p in rx:
self.validate_outer6(p, udp_encap_2)
p = IP(p["UDP"].payload.load)
self.validate_inner4(p, p_4o6)
self.assertEqual(udp_encap_2.get_stats()['packets'], NUM_PKTS)
#
# 6o4 encap
#
p_6o4 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src="2001::100", dst="2001::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_6o4*NUM_PKTS, self.pg1)
for p in rx:
self.validate_outer4(p, udp_encap_1)
p = IPv6(p["UDP"].payload.load)
self.validate_inner6(p, p_6o4)
self.assertEqual(udp_encap_1.get_stats()['packets'], NUM_PKTS)
#
# 6o6 encap
#
p_6o6 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src="2001::100", dst="2001::3") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_6o6*NUM_PKTS, self.pg3)
for p in rx:
self.validate_outer6(p, udp_encap_3)
p = IPv6(p["UDP"].payload.load)
self.validate_inner6(p, p_6o6)
self.assertEqual(udp_encap_3.get_stats()['packets'], NUM_PKTS)
#
# A route with an output label
# the TTL of the inner packet is decremented on LSP ingress
#
route_4oMPLSo4 = VppIpRoute(
self, "1.1.2.22", 32,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
next_hop_id=1,
labels=[VppMplsLabel(66)])])
route_4oMPLSo4.add_vpp_config()
p_4omo4 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="2.2.2.2", dst="1.1.2.22") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_4omo4*NUM_PKTS, self.pg1)
for p in rx:
self.validate_outer4(p, udp_encap_1)
p = MPLS(p["UDP"].payload.load)
self.validate_inner4(p, p_4omo4, ttl=63)
self.assertEqual(udp_encap_1.get_stats()['packets'], 2*NUM_PKTS)
def test_udp_decap(self):
""" UDP Decap test
"""
#
# construct a UDP decap object for each type of protocol
#
# IPv4
udp_api_proto = VppEnum.vl_api_udp_decap_next_proto_t
next_proto = udp_api_proto.UDP_API_DECAP_PROTO_IP4
udp_decap_0 = VppUdpDecap(self, 1, 220, next_proto)
# IPv6
next_proto = udp_api_proto.UDP_API_DECAP_PROTO_IP6
udp_decap_1 = VppUdpDecap(self, 0, 221, next_proto)
# MPLS
next_proto = udp_api_proto.UDP_API_DECAP_PROTO_MPLS
udp_decap_2 = VppUdpDecap(self, 1, 222, next_proto)
udp_decap_0.add_vpp_config()
udp_decap_1.add_vpp_config()
udp_decap_2.add_vpp_config()
#
# Routes via the corresponding pg after the UDP decap
#
route_4 = VppIpRoute(
self, "1.1.1.1", 32,
[VppRoutePath("0.0.0.0", self.pg0.sw_if_index)],
table_id=0)
route_6 = VppIpRoute(
self, "2001::1", 128,
[VppRoutePath("::", self.pg1.sw_if_index)],
table_id=1)
route_mo4 = VppIpRoute(
self, "3.3.3.3", 32,
[VppRoutePath("0.0.0.0", self.pg2.sw_if_index)],
table_id=2)
route_4.add_vpp_config()
route_6.add_vpp_config()
route_mo4.add_vpp_config()
#
# Adding neighbors to route the packets
#
n_4 = VppNeighbor(self,
self.pg0.sw_if_index,
"00:11:22:33:44:55",
"1.1.1.1")
n_6 = VppNeighbor(self,
self.pg1.sw_if_index,
"11:22:33:44:55:66",
"2001::1")
n_mo4 = VppNeighbor(self,
self.pg2.sw_if_index,
"22:33:44:55:66:77",
"3.3.3.3")
n_4.add_vpp_config()
n_6.add_vpp_config()
n_mo4.add_vpp_config()
#
# MPLS decapsulation config
#
mpls_table = VppMplsTable(self, 0)
mpls_table.add_vpp_config()
mpls_route = VppMplsRoute(
self, 77, 1,
[VppRoutePath("0.0.0.0",
0xFFFFFFFF,
nh_table_id=2,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
mpls_route.add_vpp_config()
#
# UDP over ipv4 decap
#
p_4 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=1111, dport=220) /
IP(src="2.2.2.2", dst="1.1.1.1") /
UDP(sport=1234, dport=4321) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_4*NUM_PKTS, self.pg0)
p_4 = IP(p_4["UDP"].payload)
for p in rx:
p = IP(p["Ether"].payload)
self.validate_inner4(p, p_4, ttl=63)
#
# UDP over ipv6 decap
#
p_6 = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IPv6(src=self.pg1.remote_ip6, dst=self.pg1.local_ip6) /
UDP(sport=2222, dport=221) /
IPv6(src="2001::100", dst="2001::1") /
UDP(sport=1234, dport=4321) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg1, p_6*NUM_PKTS, self.pg1)
p_6 = IPv6(p_6["UDP"].payload)
p = IPv6(rx[0]["Ether"].payload)
for p in rx:
p = IPv6(p["Ether"].payload)
self.validate_inner6(p, p_6, hlim=63)
#
# UDP over mpls decap
#
p_mo4 = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg2.remote_ip4, dst=self.pg2.local_ip4) /
UDP(sport=3333, dport=222) /
MPLS(label=77, ttl=1) /
IP(src="4.4.4.4", dst="3.3.3.3") /
UDP(sport=1234, dport=4321) /
Raw(b'\xa5' * 100))
self.pg2.enable_mpls()
rx = self.send_and_expect(self.pg2, p_mo4*NUM_PKTS, self.pg2)
self.pg2.disable_mpls()
p_mo4 = IP(MPLS(p_mo4["UDP"].payload).payload)
for p in rx:
p = IP(p["Ether"].payload)
self.validate_inner4(p, p_mo4, ttl=63)
@tag_fixme_vpp_workers
class TestUDP(VppTestCase):
""" UDP Test Case """
@classmethod
def setUpClass(cls):
super(TestUDP, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestUDP, cls).tearDownClass()
def setUp(self):
super(TestUDP, self).setUp()
self.vapi.session_enable_disable(is_enable=1)
self.create_loopback_interfaces(2)
table_id = 0
for i in self.lo_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
i.set_table_ip4(table_id)
i.config_ip4()
table_id += 1
# Configure namespaces
self.vapi.app_namespace_add_del(namespace_id="0",
sw_if_index=self.loop0.sw_if_index)
self.vapi.app_namespace_add_del(namespace_id="1",
sw_if_index=self.loop1.sw_if_index)
def tearDown(self):
for i in self.lo_interfaces:
i.unconfig_ip4()
i.set_table_ip4(0)
i.admin_down()
self.vapi.session_enable_disable(is_enable=0)
super(TestUDP, self).tearDown()
def test_udp_transfer(self):
""" UDP echo client/server transfer """
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)], table_id=1)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
# Start builtin server and client
uri = "udp://" + self.loop0.local_ip4 + "/1234"
error = self.vapi.cli("test echo server appns 0 fifo-size 4 no-echo" +
"uri " + uri)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
error = self.vapi.cli("test echo client mbytes 10 appns 1 " +
"fifo-size 4 no-output test-bytes " +
"syn-timeout 2 no-return uri " + uri)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
self.logger.debug(self.vapi.cli("show session verbose 2"))
# Delete inter-table routes
ip_t01.remove_vpp_config()
ip_t10.remove_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
from dataclasses import dataclass
from joemetry._type_hints import *
from .point import *
@dataclass
class Segment:
__slots__ = ['start', 'end']
start: Point
end: Point
def __post_init__(self):
self.start = Point(*self.start)
self.end = Point(*self.end)
@property
def slant(self) -> float:
'''
Y = mX + C
pretty sure that you're familiar with this equation
this returns the "m" inside of the equation
'''
return round((self.start.y - self.end.y) / (self.start.x - self.end.x), 2)
@property
def _length(self) -> float:
# this is the 'real' length of the point, used for computation in order to maximize accuracy
return sqrt((self.start.x - self.end.x)**2 + (self.start.y - self.end.y)**2)
@property
def length(self) -> float:
# this is the 'fake' length of the point, displayed when printed
return round(self._length, 2)
@property
def midpoint(self) -> 'Point':
'''returns the midpoint of "this" segment'''
return Point((self.start.x + self.end.x) / 2, (self.start.y + self.end.y) / 2)
@property
def unit_vector(self) -> 'Point':
'''returns a normalized segment'''
return (self.end - self.start) / self._length
@classmethod
def convert(cls, segments: List[Tuple[Coor, Coor]]) -> List['Segment']:
'''converts either a list of tuple of point objects or a list of tuple of tuple of 2 float into segment objects'''
if not all(isinstance(c, (tuple, list)) for c in segments):
raise TypeError(f"the given data type must be tuples containing float/int")
return [cls(*seg) for seg in segments]
def perpendicular_with(self, other: 'Segment') -> bool:
'''
check whether "this" segment is perpendicular with the other segment
uses the [m1 x m2 = -1] equation
'''
return (self.slant * other.slant) == -1
def parallel_with(self, other: 'Segment') -> bool:
'''
check whether "this" segment is parallel with the other segment
'''
return self.slant == other.slant
def collinear_with(self, other: 'Segment') -> bool:
'''
check whether "this" segment is collinear with the other segment
uses the cross-product of both ends of the segments
'''
return self.start.cross(other.start) == self.end.cross(other.end) == 0
def scale_to_length(self, length: Num, direction='mid') -> None:
'''
extend or contract "this" segment to the given length
direction: valid options -> "start", "mid", "end"
"start": extend/contract the segment at the starting point only
"end": extend/contract the segment at the ending point only
"mid": extend/contract the segment at the starting point and ending point equally
'''
if direction not in ['start', 'mid', 'end']:
raise ValueError(f"{direction} is not a valid direction")
scale_factor = self.unit_vector * (length - self._length)
plus_start = plus_end = scale_factor
if direction == 'mid':
plus_start = plus_end = scale_factor * 0.5
elif direction == 'start':
plus_end = (0, 0)
elif direction == 'end':
plus_start = (0, 0)
self.start -= plus_start
self.end += plus_end
def rotate(self,
angle: Num,
origin: Optional[Coor] = (0,0),
clockwise: Optional[bool] = True
) -> 'Segment':
'''
returns a segment that is rotated to the given angle
angle: 0-360 degree
origin: relative origin for the roatation
clockwise: it's pretty self-explanatory, init?
'''
return Segment(self.start.rotate(angle, origin, clockwise), self.end.rotate(angle, origin, clockwise))
def rotate_ip(self,
angle: Num,
origin: Optional[Coor] = (0,0),
clockwise: Optional[bool] = True
) -> None:
'''
rotates this segment to the given angle
angle: 0-360 degree
origin: relative origin for the roatation
clockwise: it's pretty self-explanatory, init?
'''
self.start.rotate_ip(angle, origin, clockwise)
self.end.rotate_ip(angle, origin, clockwise)
def intersect_with(self, other: 'Segment') -> Coor:
'''
returns either None or an intersecting point of both segment
'''
start = other.start - self.start
end_1, end_2 = self.end - self.start, other.end - other.start
determinant = end_1.cross(end_2)
if determinant == 0: return None
check_1 = (start).cross(end_2) / determinant
check_2 = (start).cross(end_1) / determinant
if (0 <= check_1 <= 1) and (0 <= check_2 <= 1):
return round(self.start + (self.end - self.start) * check_1, 2)
return None
def __mul__(self, scale_factor: Num):
'''contract/extend the segment's length by the given scale factor'''
if not isinstance(scale_factor, (float, int)):
raise TypeError(f"cannot multiply {type(self).__name__} by '{type(scale_factor).__name__}'")
return Segment(self.start * scale_factor, self.end * scale_factor)
def __truediv__(self, scale_factor: Num):
'''contract/extend the segment's length by the given scale factor'''
if not isinstance(scale_factor, (float, int)):
raise TypeError(f"cannot divide {type(self).__name__} by '{type(scale_factor).__name__}'")
return Segment(self.start / scale_factor, self.end / scale_factor)
def __floordiv__(self, scale_factor: Num):
'''contract/extend the segment's length by the given scale factor'''
if not isinstance(scale_factor, (float, int)):
raise TypeError(f"cannot divide(floor) {type(self).__name__} by '{type(scale_factor).__name__}'")
return Segment(self.start // scale_factor, self.end // scale_factor)
def __getitem__(self, index: int) -> 'Point':
if index == 0: return self.start
if index == 1: return self.end
def __setitem__(self, index: int, val: Coor) -> Num:
if index == 0: self.start = Point(*val)
if index == 1: self.end = Point(*val)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-01 11:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('problem', models.CharField(max_length=200)),
('answer', models.BooleanField()),
('explanation', models.CharField(max_length=200)),
('img_url', models.CharField(max_length=100)),
],
),
]
|
ETH_ADDRESS = "0x0000000000000000000000000000000000000000"
WETH9_ADDRESS = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
# see: https://chainid.network/chains/
_netid_to_name = {
1: "mainnet",
3: "ropsten",
4: "rinkeby",
56: "binance",
97: "binance_testnet",
137: "polygon",
100: "xdai",
}
_factory_contract_addresses_v1 = {
"mainnet": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95",
"ropsten": "0x9c83dCE8CA20E9aAF9D3efc003b2ea62aBC08351",
"rinkeby": "0xf5D915570BC477f9B8D6C0E980aA81757A3AaC36",
"kovan": "0xD3E51Ef092B2845f10401a0159B2B96e8B6c3D30",
"görli": "0x6Ce570d02D73d4c384b46135E87f8C592A8c86dA",
}
# For v2 the address is the same on mainnet, Ropsten, Rinkeby, Görli, and Kovan
# https://uniswap.org/docs/v2/smart-contracts/factory
_factory_contract_addresses_v2 = {
"mainnet": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"ropsten": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"rinkeby": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"görli": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"xdai": "0xA818b4F111Ccac7AA31D0BCc0806d64F2E0737D7",
"binance": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
}
_router_contract_addresses_v2 = {
"mainnet": "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D",
"ropsten": "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D",
"rinkeby": "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D",
"görli": "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D",
"xdai": "0x1C232F01118CB8B424793ae03F870aa7D0ac7f77",
"binance": "0x10ED43C718714eb63d5aA57B78B54704E256024E",
}
|
from django_sorcery.db import databases
db = databases.get("minimal_backpop")
class Asset(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=5))
order = db.ManyToOne("Order", back_populates="assets")
class OrderItem(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=5))
orders = db.OneToMany("Order", back_populates="order_item")
class Customer(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=5))
orders = db.OneToMany("Order", back_populates="applicant")
coapp_orders = db.OneToMany("Order", back_populates="coapplicant")
profile = db.relationship("Profile", back_populates="customer", uselist=False)
class Profile(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=20))
customer = db.OneToOne("Customer", back_populates="profile")
class Contact(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=5))
orders = db.ManyToMany("Order", back_populates="contacts", table_name="order_contacts")
class Order(db.Model):
pk = db.Column(db.Integer(), autoincrement=True, primary_key=True)
name = db.Column(db.String(length=5))
order_item = db.ManyToOne("OrderItem", back_populates="orders")
applicant = db.ManyToOne("Customer", back_populates="orders")
coapplicant = db.ManyToOne("Customer", back_populates="coapp_orders")
assets = db.OneToMany(Asset, back_populates="order")
contacts = db.ManyToMany("Contact", back_populates="orders", table_name="order_contacts")
db.configure_mappers()
db.drop_all()
db.create_all()
|
import sys
import traceback
from mapswipe_workers.utils import slack
def _get_error_message_details(error):
"""
The function to nicely extract error text and traceback."
Parameters
----------
error : Exception
the python exception which caused the error
Returns
-------
error_msg_string : str
"""
type_, value_, traceback_ = sys.exc_info()
error_msg = traceback.format_exception(type_, value_, traceback_)
error_msg_string = ''
for part in error_msg:
error_msg_string += part + '\n'
return error_msg_string
def send_error(error, process):
"""
The function to send an error message to Slack
Parameters
----------
error : Exception
the python exception which caused the error
process : str
the name of the process for which the exception occurred, e.g. 'import'
Returns
-------
bool
True if successful, false otherwise.
"""
error_msg = _get_error_message_details(error)
head = 'python-mapswipe-workers: error occured during "{}"'.format(process)
slack.send_slack_message(head + '\n' + error_msg)
return True
def log_error(error, logger):
"""
The function to log error to logging file
Parameters
----------
error : Exception
the python exception which caused the error
logger : logging.logger
the logger object
Returns
-------
bool
True if successful, false otherwise.
"""
error_msg = _get_error_message_details(error)
logger.error('Error detail:\n' + error_msg)
return True
|
"""Facilities for generating error messages during type checking.
Don't add any non-trivial message construction logic to the type
checker, as it can compromise clarity and make messages less
consistent. Add such logic to this module instead. Literal messages, including those
with format args, should be defined as constants in mypy.message_registry.
Historically we tried to avoid all message string literals in the type
checker but we are moving away from this convention.
"""
from collections import OrderedDict
import re
import difflib
from textwrap import dedent
from typing import cast, List, Dict, Any, Sequence, Iterable, Tuple, Set, Optional, Union
from typing_extensions import Final
from mypy.erasetype import erase_type
from mypy.errors import Errors
from mypy.types import (
Type, CallableType, Instance, TypeVarType, TupleType, TypedDictType, LiteralType,
UnionType, NoneType, AnyType, Overloaded, FunctionLike, DeletedType, TypeType,
UninhabitedType, TypeOfAny, UnboundType, PartialType, get_proper_type, ProperType,
get_proper_types
)
from mypy.typetraverser import TypeTraverserVisitor
from mypy.nodes import (
TypeInfo, Context, MypyFile, op_methods, FuncDef, reverse_builtin_aliases,
ARG_POS, ARG_OPT, ARG_NAMED, ARG_NAMED_OPT, ARG_STAR, ARG_STAR2,
ReturnStmt, NameExpr, Var, CONTRAVARIANT, COVARIANT, SymbolNode,
CallExpr
)
from mypy.subtypes import (
is_subtype, find_member, get_member_flags,
IS_SETTABLE, IS_CLASSVAR, IS_CLASS_OR_STATIC,
)
from mypy.sametypes import is_same_type
from mypy.util import unmangle
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
ARG_CONSTRUCTOR_NAMES = {
ARG_POS: "Arg",
ARG_OPT: "DefaultArg",
ARG_NAMED: "NamedArg",
ARG_NAMED_OPT: "DefaultNamedArg",
ARG_STAR: "VarArg",
ARG_STAR2: "KwArg",
} # type: Final
class MessageBuilder:
"""Helper class for reporting type checker error messages with parameters.
The methods of this class need to be provided with the context within a
file; the errors member manages the wider context.
IDEA: Support a 'verbose mode' that includes full information about types
in error messages and that may otherwise produce more detailed error
messages.
"""
# Report errors using this instance. It knows about the current file and
# import context.
errors = None # type: Errors
modules = None # type: Dict[str, MypyFile]
# Number of times errors have been disabled.
disable_count = 0
# Hack to deduplicate error messages from union types
disable_type_names = 0
def __init__(self, errors: Errors, modules: Dict[str, MypyFile]) -> None:
self.errors = errors
self.modules = modules
self.disable_count = 0
self.disable_type_names = 0
#
# Helpers
#
def copy(self) -> 'MessageBuilder':
new = MessageBuilder(self.errors.copy(), self.modules)
new.disable_count = self.disable_count
new.disable_type_names = self.disable_type_names
return new
def clean_copy(self) -> 'MessageBuilder':
errors = self.errors.copy()
errors.error_info_map = OrderedDict()
return MessageBuilder(errors, self.modules)
def add_errors(self, messages: 'MessageBuilder') -> None:
"""Add errors in messages to this builder."""
if self.disable_count <= 0:
for errs in messages.errors.error_info_map.values():
for info in errs:
self.errors.add_error_info(info)
def disable_errors(self) -> None:
self.disable_count += 1
def enable_errors(self) -> None:
self.disable_count -= 1
def is_errors(self) -> bool:
return self.errors.is_errors()
def report(self,
msg: str,
context: Optional[Context],
severity: str,
*,
code: Optional[ErrorCode] = None,
file: Optional[str] = None,
origin: Optional[Context] = None,
offset: int = 0) -> None:
"""Report an error or note (unless disabled)."""
if origin is not None:
end_line = origin.end_line
elif context is not None:
end_line = context.end_line
else:
end_line = None
if self.disable_count <= 0:
self.errors.report(context.get_line() if context else -1,
context.get_column() if context else -1,
msg, severity=severity, file=file, offset=offset,
origin_line=origin.get_line() if origin else None,
end_line=end_line,
code=code)
def fail(self,
msg: str,
context: Optional[Context],
*,
code: Optional[ErrorCode] = None,
file: Optional[str] = None,
origin: Optional[Context] = None) -> None:
"""Report an error message (unless disabled)."""
self.report(msg, context, 'error', code=code, file=file, origin=origin)
def note(self,
msg: str,
context: Context,
file: Optional[str] = None,
origin: Optional[Context] = None,
offset: int = 0,
*,
code: Optional[ErrorCode] = None) -> None:
"""Report a note (unless disabled)."""
self.report(msg, context, 'note', file=file, origin=origin,
offset=offset, code=code)
def note_multiline(self, messages: str, context: Context, file: Optional[str] = None,
origin: Optional[Context] = None, offset: int = 0,
code: Optional[ErrorCode] = None) -> None:
"""Report as many notes as lines in the message (unless disabled)."""
for msg in messages.splitlines():
self.report(msg, context, 'note', file=file, origin=origin,
offset=offset, code=code)
#
# Specific operations
#
# The following operations are for generating specific error messages. They
# get some information as arguments, and they build an error message based
# on them.
def has_no_attr(self, original_type: Type, typ: Type, member: str, context: Context) -> Type:
"""Report a missing or non-accessible member.
original_type is the top-level type on which the error occurred.
typ is the actual type that is missing the member. These can be
different, e.g., in a union, original_type will be the union and typ
will be the specific item in the union that does not have the member
attribute.
If member corresponds to an operator, use the corresponding operator
name in the messages. Return type Any.
"""
original_type = get_proper_type(original_type)
typ = get_proper_type(typ)
if (isinstance(original_type, Instance) and
original_type.type.has_readable_member(member)):
self.fail('Member "{}" is not assignable'.format(member), context)
elif member == '__contains__':
self.fail('Unsupported right operand type for in ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member in op_methods.values():
# Access to a binary operator member (e.g. _add). This case does
# not handle indexing operations.
for op, method in op_methods.items():
if method == member:
self.unsupported_left_operand(op, original_type, context)
break
elif member == '__neg__':
self.fail('Unsupported operand type for unary - ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__pos__':
self.fail('Unsupported operand type for unary + ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__invert__':
self.fail('Unsupported operand type for ~ ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__getitem__':
# Indexed get.
# TODO: Fix this consistently in format_type
if isinstance(original_type, CallableType) and original_type.is_type_obj():
self.fail('The type {} is not generic and not indexable'.format(
format_type(original_type)), context)
else:
self.fail('Value of type {} is not indexable'.format(
format_type(original_type)), context, code=codes.INDEX)
elif member == '__setitem__':
# Indexed set.
self.fail('Unsupported target for indexed assignment', context, code=codes.INDEX)
elif member == '__call__':
if isinstance(original_type, Instance) and \
(original_type.type.fullname() == 'builtins.function'):
# "'function' not callable" is a confusing error message.
# Explain that the problem is that the type of the function is not known.
self.fail('Cannot call function of unknown type', context, code=codes.OPERATOR)
else:
self.fail('{} not callable'.format(format_type(original_type)), context,
code=codes.OPERATOR)
else:
# The non-special case: a missing ordinary attribute.
extra = ''
if member == '__iter__':
extra = ' (not iterable)'
elif member == '__aiter__':
extra = ' (not async iterable)'
if not self.disable_type_names:
failed = False
if isinstance(original_type, Instance) and original_type.type.names:
alternatives = set(original_type.type.names.keys())
matches = [m for m in COMMON_MISTAKES.get(member, []) if m in alternatives]
matches.extend(best_matches(member, alternatives)[:3])
if member == '__aiter__' and matches == ['__iter__']:
matches = [] # Avoid misleading suggestion
if member == '__div__' and matches == ['__truediv__']:
# TODO: Handle differences in division between Python 2 and 3 more cleanly
matches = []
if matches:
self.fail(
'{} has no attribute "{}"; maybe {}?{}'.format(
format_type(original_type), member, pretty_or(matches), extra),
context,
code=codes.ATTR_DEFINED)
failed = True
if not failed:
self.fail(
'{} has no attribute "{}"{}'.format(
format_type(original_type), member, extra),
context,
code=codes.ATTR_DEFINED)
elif isinstance(original_type, UnionType):
# The checker passes "object" in lieu of "None" for attribute
# checks, so we manually convert it back.
typ_format, orig_type_format = format_type_distinctly(typ, original_type)
if typ_format == '"object"' and \
any(type(item) == NoneType for item in original_type.items):
typ_format = '"None"'
self.fail('Item {} of {} has no attribute "{}"{}'.format(
typ_format, orig_type_format, member, extra), context,
code=codes.UNION_ATTR)
return AnyType(TypeOfAny.from_error)
def unsupported_operand_types(self,
op: str,
left_type: Any,
right_type: Any,
context: Context,
*,
code: ErrorCode = codes.OPERATOR) -> None:
"""Report unsupported operand types for a binary operation.
Types can be Type objects or strings.
"""
left_str = ''
if isinstance(left_type, str):
left_str = left_type
else:
left_str = format_type(left_type)
right_str = ''
if isinstance(right_type, str):
right_str = right_type
else:
right_str = format_type(right_type)
if self.disable_type_names:
msg = 'Unsupported operand types for {} (likely involving Union)'.format(op)
else:
msg = 'Unsupported operand types for {} ({} and {})'.format(
op, left_str, right_str)
self.fail(msg, context, code=code)
def unsupported_left_operand(self, op: str, typ: Type,
context: Context) -> None:
if self.disable_type_names:
msg = 'Unsupported left operand type for {} (some union)'.format(op)
else:
msg = 'Unsupported left operand type for {} ({})'.format(
op, format_type(typ))
self.fail(msg, context, code=codes.OPERATOR)
def not_callable(self, typ: Type, context: Context) -> Type:
self.fail('{} not callable'.format(format_type(typ)), context)
return AnyType(TypeOfAny.from_error)
def untyped_function_call(self, callee: CallableType, context: Context) -> Type:
name = callable_name(callee) or '(unknown)'
self.fail('Call to untyped function {} in typed context'.format(name), context,
code=codes.NO_UNTYPED_CALL)
return AnyType(TypeOfAny.from_error)
def incompatible_argument(self,
n: int,
m: int,
callee: CallableType,
arg_type: Type,
arg_kind: int,
context: Context,
outer_context: Context) -> Optional[ErrorCode]:
"""Report an error about an incompatible argument type.
The argument type is arg_type, argument number is n and the
callee type is 'callee'. If the callee represents a method
that corresponds to an operator, use the corresponding
operator name in the messages.
Return the error code that used for the argument (multiple error
codes are possible).
"""
arg_type = get_proper_type(arg_type)
target = ''
callee_name = callable_name(callee)
if callee_name is not None:
name = callee_name
if callee.bound_args and callee.bound_args[0] is not None:
base = format_type(callee.bound_args[0])
else:
base = extract_type(name)
for op, method in op_methods.items():
for variant in method, '__r' + method[2:]:
# FIX: do not rely on textual formatting
if name.startswith('"{}" of'.format(variant)):
if op == 'in' or variant != method:
# Reversed order of base/argument.
self.unsupported_operand_types(op, arg_type, base,
context, code=codes.OPERATOR)
else:
self.unsupported_operand_types(op, base, arg_type,
context, code=codes.OPERATOR)
return codes.OPERATOR
if name.startswith('"__getitem__" of'):
self.invalid_index_type(arg_type, callee.arg_types[n - 1], base, context,
code=codes.INDEX)
return codes.INDEX
if name.startswith('"__setitem__" of'):
if n == 1:
self.invalid_index_type(arg_type, callee.arg_types[n - 1], base, context,
code=codes.INDEX)
return codes.INDEX
else:
msg = '{} (expression has type {}, target has type {})'
arg_type_str, callee_type_str = format_type_distinctly(arg_type,
callee.arg_types[n - 1])
self.fail(msg.format(message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
arg_type_str, callee_type_str),
context, code=codes.ASSIGNMENT)
return codes.ASSIGNMENT
target = 'to {} '.format(name)
msg = ''
code = codes.MISC
notes = [] # type: List[str]
if callee_name == '<list>':
name = callee_name[1:-1]
n -= 1
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[0])
msg = '{} item {} has incompatible type {}; expected {}'.format(
name.title(), n, actual_type_str, expected_type_str)
code = codes.LIST_ITEM
elif callee_name == '<dict>':
name = callee_name[1:-1]
n -= 1
key_type, value_type = cast(TupleType, arg_type).items
expected_key_type, expected_value_type = cast(TupleType, callee.arg_types[0]).items
# don't increase verbosity unless there is need to do so
if is_subtype(key_type, expected_key_type):
key_type_str = format_type(key_type)
expected_key_type_str = format_type(expected_key_type)
else:
key_type_str, expected_key_type_str = format_type_distinctly(
key_type, expected_key_type)
if is_subtype(value_type, expected_value_type):
value_type_str = format_type(value_type)
expected_value_type_str = format_type(expected_value_type)
else:
value_type_str, expected_value_type_str = format_type_distinctly(
value_type, expected_value_type)
msg = '{} entry {} has incompatible type {}: {}; expected {}: {}'.format(
name.title(), n, key_type_str, value_type_str,
expected_key_type_str, expected_value_type_str)
code = codes.DICT_ITEM
elif callee_name == '<list-comprehension>':
actual_type_str, expected_type_str = map(strip_quotes,
format_type_distinctly(arg_type,
callee.arg_types[0]))
msg = 'List comprehension has incompatible type List[{}]; expected List[{}]'.format(
actual_type_str, expected_type_str)
elif callee_name == '<set-comprehension>':
actual_type_str, expected_type_str = map(strip_quotes,
format_type_distinctly(arg_type,
callee.arg_types[0]))
msg = 'Set comprehension has incompatible type Set[{}]; expected Set[{}]'.format(
actual_type_str, expected_type_str)
elif callee_name == '<dictionary-comprehension>':
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[n - 1])
msg = ('{} expression in dictionary comprehension has incompatible type {}; '
'expected type {}').format(
'Key' if n == 1 else 'Value',
actual_type_str,
expected_type_str)
elif callee_name == '<generator>':
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[0])
msg = 'Generator has incompatible item type {}; expected {}'.format(
actual_type_str, expected_type_str)
else:
try:
expected_type = callee.arg_types[m - 1]
except IndexError: # Varargs callees
expected_type = callee.arg_types[-1]
arg_type_str, expected_type_str = format_type_distinctly(
arg_type, expected_type, bare=True)
if arg_kind == ARG_STAR:
arg_type_str = '*' + arg_type_str
elif arg_kind == ARG_STAR2:
arg_type_str = '**' + arg_type_str
# For function calls with keyword arguments, display the argument name rather than the
# number.
arg_label = str(n)
if isinstance(outer_context, CallExpr) and len(outer_context.arg_names) >= n:
arg_name = outer_context.arg_names[n - 1]
if arg_name is not None:
arg_label = '"{}"'.format(arg_name)
if (arg_kind == ARG_STAR2
and isinstance(arg_type, TypedDictType)
and m <= len(callee.arg_names)
and callee.arg_names[m - 1] is not None
and callee.arg_kinds[m - 1] != ARG_STAR2):
arg_name = callee.arg_names[m - 1]
assert arg_name is not None
arg_type_str, expected_type_str = format_type_distinctly(
arg_type.items[arg_name],
expected_type,
bare=True)
arg_label = '"{}"'.format(arg_name)
msg = 'Argument {} {}has incompatible type {}; expected {}'.format(
arg_label, target, quote_type_string(arg_type_str),
quote_type_string(expected_type_str))
code = codes.ARG_TYPE
expected_type = get_proper_type(expected_type)
if isinstance(expected_type, UnionType):
expected_types = list(expected_type.items)
else:
expected_types = [expected_type]
for type in expected_types:
if isinstance(arg_type, Instance) and isinstance(type, Instance):
notes = append_invariance_notes(notes, arg_type, type)
self.fail(msg, context, code=code)
if notes:
for note_msg in notes:
self.note(note_msg, context, code=code)
return code
def incompatible_argument_note(self,
original_caller_type: ProperType,
callee_type: ProperType,
context: Context,
code: Optional[ErrorCode]) -> None:
if (isinstance(original_caller_type, (Instance, TupleType, TypedDictType)) and
isinstance(callee_type, Instance) and callee_type.type.is_protocol):
self.report_protocol_problems(original_caller_type, callee_type, context, code=code)
if (isinstance(callee_type, CallableType) and
isinstance(original_caller_type, Instance)):
call = find_member('__call__', original_caller_type, original_caller_type,
is_operator=True)
if call:
self.note_call(original_caller_type, call, context, code=code)
def invalid_index_type(self, index_type: Type, expected_type: Type, base_str: str,
context: Context, *, code: ErrorCode) -> None:
index_str, expected_str = format_type_distinctly(index_type, expected_type)
self.fail('Invalid index type {} for {}; expected type {}'.format(
index_str, base_str, expected_str), context, code=code)
def too_few_arguments(self, callee: CallableType, context: Context,
argument_names: Optional[Sequence[Optional[str]]]) -> None:
if (argument_names is not None and not all(k is None for k in argument_names)
and len(argument_names) >= 1):
num_positional_args = sum(k is None for k in argument_names)
arguments_left = callee.arg_names[num_positional_args:callee.min_args]
diff = [k for k in arguments_left if k not in argument_names]
if len(diff) == 1:
msg = 'Missing positional argument'
else:
msg = 'Missing positional arguments'
callee_name = callable_name(callee)
if callee_name is not None and diff and all(d is not None for d in diff):
args = '", "'.join(cast(List[str], diff))
msg += ' "{}" in call to {}'.format(args, callee_name)
else:
msg = 'Too few arguments' + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def missing_named_argument(self, callee: CallableType, context: Context, name: str) -> None:
msg = 'Missing named argument "{}"'.format(name) + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def too_many_arguments(self, callee: CallableType, context: Context) -> None:
msg = 'Too many arguments' + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def too_many_arguments_from_typed_dict(self,
callee: CallableType,
arg_type: TypedDictType,
context: Context) -> None:
# Try to determine the name of the extra argument.
for key in arg_type.items:
if key not in callee.arg_names:
msg = 'Extra argument "{}" from **args'.format(key) + for_function(callee)
break
else:
self.too_many_arguments(callee, context)
return
self.fail(msg, context)
def too_many_positional_arguments(self, callee: CallableType,
context: Context) -> None:
msg = 'Too many positional arguments' + for_function(callee)
self.fail(msg, context)
def unexpected_keyword_argument(self, callee: CallableType, name: str,
context: Context) -> None:
msg = 'Unexpected keyword argument "{}"'.format(name) + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
module = find_defining_module(self.modules, callee)
if module:
assert callee.definition is not None
fname = callable_name(callee)
if not fname: # an alias to function with a different name
fname = 'Called function'
self.note('{} defined here'.format(fname), callee.definition,
file=module.path, origin=context, code=codes.CALL_ARG)
def duplicate_argument_value(self, callee: CallableType, index: int,
context: Context) -> None:
self.fail('{} gets multiple values for keyword argument "{}"'.
format(callable_name(callee) or 'Function', callee.arg_names[index]),
context)
def does_not_return_value(self, callee_type: Optional[Type], context: Context) -> None:
"""Report an error about use of an unusable type."""
name = None # type: Optional[str]
callee_type = get_proper_type(callee_type)
if isinstance(callee_type, FunctionLike):
name = callable_name(callee_type)
if name is not None:
self.fail('{} does not return a value'.format(capitalize(name)), context,
code=codes.FUNC_RETURNS_VALUE)
else:
self.fail('Function does not return a value', context, code=codes.FUNC_RETURNS_VALUE)
def deleted_as_rvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an rvalue."""
if typ.source is None:
s = ""
else:
s = " '{}'".format(typ.source)
self.fail('Trying to read deleted variable{}'.format(s), context)
def deleted_as_lvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an lvalue.
Currently, this only occurs when trying to assign to an
exception variable outside the local except: blocks.
"""
if typ.source is None:
s = ""
else:
s = " '{}'".format(typ.source)
self.fail('Assignment to variable{} outside except: block'.format(s), context)
def no_variant_matches_arguments(self,
plausible_targets: List[CallableType],
overload: Overloaded,
arg_types: List[Type],
context: Context) -> None:
name = callable_name(overload)
if name:
name_str = ' of {}'.format(name)
else:
name_str = ''
arg_types_str = ', '.join(format_type(arg) for arg in arg_types)
num_args = len(arg_types)
if num_args == 0:
self.fail('All overload variants{} require at least one argument'.format(name_str),
context, code=codes.CALL_OVERLOAD)
elif num_args == 1:
self.fail('No overload variant{} matches argument type {}'
.format(name_str, arg_types_str), context, code=codes.CALL_OVERLOAD)
else:
self.fail('No overload variant{} matches argument types {}'
.format(name_str, arg_types_str), context, code=codes.CALL_OVERLOAD)
self.pretty_overload_matches(plausible_targets, overload, context, offset=2, max_items=2,
code=codes.CALL_OVERLOAD)
def wrong_number_values_to_unpack(self, provided: int, expected: int,
context: Context) -> None:
if provided < expected:
if provided == 1:
self.fail('Need more than 1 value to unpack ({} expected)'.format(expected),
context)
else:
self.fail('Need more than {} values to unpack ({} expected)'.format(
provided, expected), context)
elif provided > expected:
self.fail('Too many values to unpack ({} expected, {} provided)'.format(
expected, provided), context)
def type_not_iterable(self, type: Type, context: Context) -> None:
self.fail('\'{}\' object is not iterable'.format(type), context)
def incompatible_operator_assignment(self, op: str,
context: Context) -> None:
self.fail('Result type of {} incompatible in assignment'.format(op),
context)
def overload_signature_incompatible_with_supertype(
self, name: str, name_in_super: str, supertype: str,
overload: Overloaded, context: Context) -> None:
target = self.override_target(name, name_in_super, supertype)
self.fail('Signature of "{}" incompatible with {}'.format(
name, target), context, code=codes.OVERRIDE)
note_template = 'Overload variants must be defined in the same order as they are in "{}"'
self.note(note_template.format(supertype), context, code=codes.OVERRIDE)
def signature_incompatible_with_supertype(
self, name: str, name_in_super: str, supertype: str,
context: Context) -> None:
target = self.override_target(name, name_in_super, supertype)
self.fail('Signature of "{}" incompatible with {}'.format(
name, target), context, code=codes.OVERRIDE)
def argument_incompatible_with_supertype(
self, arg_num: int, name: str, type_name: Optional[str],
name_in_supertype: str, arg_type_in_supertype: Type, supertype: str,
context: Context) -> None:
target = self.override_target(name, name_in_supertype, supertype)
arg_type_in_supertype_f = format_type_bare(arg_type_in_supertype)
self.fail('Argument {} of "{}" is incompatible with {}; '
'supertype defines the argument type as "{}"'
.format(arg_num, name, target, arg_type_in_supertype_f),
context,
code=codes.OVERRIDE)
if name == "__eq__" and type_name:
multiline_msg = self.comparison_method_example_msg(class_name=type_name)
self.note_multiline(multiline_msg, context, code=codes.OVERRIDE)
def comparison_method_example_msg(self, class_name: str) -> str:
return dedent('''\
It is recommended for "__eq__" to work with arbitrary objects, for example:
def __eq__(self, other: object) -> bool:
if not isinstance(other, {class_name}):
return NotImplemented
return <logic to compare two {class_name} instances>
'''.format(class_name=class_name))
def return_type_incompatible_with_supertype(
self, name: str, name_in_supertype: str, supertype: str,
original: Type, override: Type,
context: Context) -> None:
target = self.override_target(name, name_in_supertype, supertype)
override_str, original_str = format_type_distinctly(override, original)
self.fail('Return type {} of "{}" incompatible with return type {} in {}'
.format(override_str, name, original_str, target),
context,
code=codes.OVERRIDE)
def override_target(self, name: str, name_in_super: str,
supertype: str) -> str:
target = 'supertype "{}"'.format(supertype)
if name_in_super != name:
target = '"{}" of {}'.format(name_in_super, target)
return target
def incompatible_type_application(self, expected_arg_count: int,
actual_arg_count: int,
context: Context) -> None:
if expected_arg_count == 0:
self.fail('Type application targets a non-generic function or class',
context)
elif actual_arg_count > expected_arg_count:
self.fail('Type application has too many types ({} expected)'
.format(expected_arg_count), context)
else:
self.fail('Type application has too few types ({} expected)'
.format(expected_arg_count), context)
def alias_invalid_in_runtime_context(self, item: ProperType, ctx: Context) -> None:
kind = (' to Callable' if isinstance(item, CallableType) else
' to Tuple' if isinstance(item, TupleType) else
' to Union' if isinstance(item, UnionType) else
' to Literal' if isinstance(item, LiteralType) else
'')
self.fail('The type alias{} is invalid in runtime context'.format(kind), ctx)
def could_not_infer_type_arguments(self, callee_type: CallableType, n: int,
context: Context) -> None:
callee_name = callable_name(callee_type)
if callee_name is not None and n > 0:
self.fail('Cannot infer type argument {} of {}'.format(n, callee_name), context)
else:
self.fail('Cannot infer function type argument', context)
def invalid_var_arg(self, typ: Type, context: Context) -> None:
self.fail('List or tuple expected as variable arguments', context)
def invalid_keyword_var_arg(self, typ: Type, is_mapping: bool, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, Instance) and is_mapping:
self.fail('Keywords must be strings', context)
else:
suffix = ''
if isinstance(typ, Instance):
suffix = ', not {}'.format(format_type(typ))
self.fail(
'Argument after ** must be a mapping{}'.format(suffix),
context)
def undefined_in_superclass(self, member: str, context: Context) -> None:
self.fail('"{}" undefined in superclass'.format(member), context)
def first_argument_for_super_must_be_type(self, actual: Type, context: Context) -> None:
actual = get_proper_type(actual)
if isinstance(actual, Instance):
# Don't include type of instance, because it can look confusingly like a type
# object.
type_str = 'a non-type instance'
else:
type_str = format_type(actual)
self.fail('Argument 1 for "super" must be a type object; got {}'.format(type_str), context)
def too_few_string_formatting_arguments(self, context: Context) -> None:
self.fail('Not enough arguments for format string', context,
code=codes.STRING_FORMATTING)
def too_many_string_formatting_arguments(self, context: Context) -> None:
self.fail('Not all arguments converted during string formatting', context,
code=codes.STRING_FORMATTING)
def unsupported_placeholder(self, placeholder: str, context: Context) -> None:
self.fail('Unsupported format character \'%s\'' % placeholder, context,
code=codes.STRING_FORMATTING)
def string_interpolation_with_star_and_key(self, context: Context) -> None:
self.fail('String interpolation contains both stars and mapping keys', context,
code=codes.STRING_FORMATTING)
def requires_int_or_char(self, context: Context,
format_call: bool = False) -> None:
self.fail('"{}c" requires int or char'.format(':' if format_call else '%'),
context, code=codes.STRING_FORMATTING)
def key_not_in_mapping(self, key: str, context: Context) -> None:
self.fail('Key \'%s\' not found in mapping' % key, context,
code=codes.STRING_FORMATTING)
def string_interpolation_mixing_key_and_non_keys(self, context: Context) -> None:
self.fail('String interpolation mixes specifier with and without mapping keys', context,
code=codes.STRING_FORMATTING)
def cannot_determine_type(self, name: str, context: Context) -> None:
self.fail("Cannot determine type of '%s'" % name, context, code=codes.HAS_TYPE)
def cannot_determine_type_in_base(self, name: str, base: str, context: Context) -> None:
self.fail("Cannot determine type of '%s' in base class '%s'" % (name, base), context)
def no_formal_self(self, name: str, item: CallableType, context: Context) -> None:
self.fail('Attribute function "%s" with type %s does not accept self argument'
% (name, format_type(item)), context)
def incompatible_self_argument(self, name: str, arg: Type, sig: CallableType,
is_classmethod: bool, context: Context) -> None:
kind = 'class attribute function' if is_classmethod else 'attribute function'
self.fail('Invalid self argument %s to %s "%s" with type %s'
% (format_type(arg), kind, name, format_type(sig)), context)
def incompatible_conditional_function_def(self, defn: FuncDef) -> None:
self.fail('All conditional function variants must have identical '
'signatures', defn)
def cannot_instantiate_abstract_class(self, class_name: str,
abstract_attributes: List[str],
context: Context) -> None:
attrs = format_string_list(["'%s'" % a for a in abstract_attributes])
self.fail("Cannot instantiate abstract class '%s' with abstract "
"attribute%s %s" % (class_name, plural_s(abstract_attributes),
attrs),
context, code=codes.ABSTRACT)
def base_class_definitions_incompatible(self, name: str, base1: TypeInfo,
base2: TypeInfo,
context: Context) -> None:
self.fail('Definition of "{}" in base class "{}" is incompatible '
'with definition in base class "{}"'.format(
name, base1.name(), base2.name()), context)
def cant_assign_to_method(self, context: Context) -> None:
self.fail(message_registry.CANNOT_ASSIGN_TO_METHOD, context,
code=codes.ASSIGNMENT)
def cant_assign_to_classvar(self, name: str, context: Context) -> None:
self.fail('Cannot assign to class variable "%s" via instance' % name, context)
def final_cant_override_writable(self, name: str, ctx: Context) -> None:
self.fail('Cannot override writable attribute "{}" with a final one'.format(name), ctx)
def cant_override_final(self, name: str, base_name: str, ctx: Context) -> None:
self.fail('Cannot override final attribute "{}"'
' (previously declared in base class "{}")'.format(name, base_name), ctx)
def cant_assign_to_final(self, name: str, attr_assign: bool, ctx: Context) -> None:
"""Warn about a prohibited assignment to a final attribute.
Pass `attr_assign=True` if the assignment assigns to an attribute.
"""
kind = "attribute" if attr_assign else "name"
self.fail('Cannot assign to final {} "{}"'.format(kind, unmangle(name)), ctx)
def protocol_members_cant_be_final(self, ctx: Context) -> None:
self.fail("Protocol member cannot be final", ctx)
def final_without_value(self, ctx: Context) -> None:
self.fail("Final name must be initialized with a value", ctx)
def read_only_property(self, name: str, type: TypeInfo,
context: Context) -> None:
self.fail('Property "{}" defined in "{}" is read-only'.format(
name, type.name()), context)
def incompatible_typevar_value(self,
callee: CallableType,
typ: Type,
typevar_name: str,
context: Context) -> None:
self.fail(message_registry.INCOMPATIBLE_TYPEVAR_VALUE
.format(typevar_name, callable_name(callee) or 'function', format_type(typ)),
context,
code=codes.TYPE_VAR)
def dangerous_comparison(self, left: Type, right: Type, kind: str, ctx: Context) -> None:
left_str = 'element' if kind == 'container' else 'left operand'
right_str = 'container item' if kind == 'container' else 'right operand'
message = 'Non-overlapping {} check ({} type: {}, {} type: {})'
left_typ, right_typ = format_type_distinctly(left, right)
self.fail(message.format(kind, left_str, left_typ, right_str, right_typ), ctx,
code=codes.COMPARISON_OVERLAP)
def overload_inconsistently_applies_decorator(self, decorator: str, context: Context) -> None:
self.fail(
'Overload does not consistently use the "@{}" '.format(decorator)
+ 'decorator on all function signatures.',
context)
def overloaded_signatures_overlap(self, index1: int, index2: int, context: Context) -> None:
self.fail('Overloaded function signatures {} and {} overlap with '
'incompatible return types'.format(index1, index2), context)
def overloaded_signature_will_never_match(self, index1: int, index2: int,
context: Context) -> None:
self.fail(
'Overloaded function signature {index2} will never be matched: '
'signature {index1}\'s parameter type(s) are the same or broader'.format(
index1=index1,
index2=index2),
context)
def overloaded_signatures_typevar_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation cannot satisfy signature {} '.format(index) +
'due to inconsistencies in how they use type variables', context)
def overloaded_signatures_arg_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation does not accept all possible arguments '
'of signature {}'.format(index), context)
def overloaded_signatures_ret_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation cannot produce return type '
'of signature {}'.format(index), context)
def warn_both_operands_are_from_unions(self, context: Context) -> None:
self.note('Both left and right operands are unions', context, code=codes.OPERATOR)
def warn_operand_was_from_union(self, side: str, original: Type, context: Context) -> None:
self.note('{} operand is of type {}'.format(side, format_type(original)), context,
code=codes.OPERATOR)
def operator_method_signatures_overlap(
self, reverse_class: TypeInfo, reverse_method: str, forward_class: Type,
forward_method: str, context: Context) -> None:
self.fail('Signatures of "{}" of "{}" and "{}" of {} '
'are unsafely overlapping'.format(
reverse_method, reverse_class.name(),
forward_method, format_type(forward_class)),
context)
def forward_operator_not_callable(
self, forward_method: str, context: Context) -> None:
self.fail('Forward operator "{}" is not callable'.format(
forward_method), context)
def signatures_incompatible(self, method: str, other_method: str,
context: Context) -> None:
self.fail('Signatures of "{}" and "{}" are incompatible'.format(
method, other_method), context)
def yield_from_invalid_operand_type(self, expr: Type, context: Context) -> Type:
text = format_type(expr) if format_type(expr) != 'object' else expr
self.fail('"yield from" can\'t be applied to {}'.format(text), context)
return AnyType(TypeOfAny.from_error)
def invalid_signature(self, func_type: Type, context: Context) -> None:
self.fail('Invalid signature "{}"'.format(func_type), context)
def invalid_signature_for_special_method(
self, func_type: Type, context: Context, method_name: str) -> None:
self.fail('Invalid signature "{}" for "{}"'.format(func_type, method_name), context)
def reveal_type(self, typ: Type, context: Context) -> None:
self.note('Revealed type is \'{}\''.format(typ), context)
def reveal_locals(self, type_map: Dict[str, Optional[Type]], context: Context) -> None:
# To ensure that the output is predictable on Python < 3.6,
# use an ordered dictionary sorted by variable name
sorted_locals = OrderedDict(sorted(type_map.items(), key=lambda t: t[0]))
self.note("Revealed local types are:", context)
for line in [' {}: {}'.format(k, v) for k, v in sorted_locals.items()]:
self.note(line, context)
def unsupported_type_type(self, item: Type, context: Context) -> None:
self.fail('Unsupported type Type[{}]'.format(format_type(item)), context)
def redundant_cast(self, typ: Type, context: Context) -> None:
self.fail('Redundant cast to {}'.format(format_type(typ)), context,
code=codes.REDUNDANT_CAST)
def unimported_type_becomes_any(self, prefix: str, typ: Type, ctx: Context) -> None:
self.fail("{} becomes {} due to an unfollowed import".format(prefix, format_type(typ)),
ctx, code=codes.NO_ANY_UNIMPORTED)
def need_annotation_for_var(self, node: SymbolNode, context: Context,
python_version: Optional[Tuple[int, int]] = None) -> None:
hint = ''
has_variable_annotations = not python_version or python_version >= (3, 6)
# Only gives hint if it's a variable declaration and the partial type is a builtin type
if (python_version and isinstance(node, Var) and isinstance(node.type, PartialType) and
node.type.type and node.type.type.fullname() in reverse_builtin_aliases):
alias = reverse_builtin_aliases[node.type.type.fullname()]
alias = alias.split('.')[-1]
type_dec = '<type>'
if alias == 'Dict':
type_dec = '{}, {}'.format(type_dec, type_dec)
if has_variable_annotations:
hint = ' (hint: "{}: {}[{}] = ...")'.format(node.name(), alias, type_dec)
else:
hint = ' (hint: "{} = ... # type: {}[{}]")'.format(node.name(), alias, type_dec)
if has_variable_annotations:
needed = 'annotation'
else:
needed = 'comment'
self.fail("Need type {} for '{}'{}".format(needed, unmangle(node.name()), hint), context,
code=codes.VAR_ANNOTATED)
def explicit_any(self, ctx: Context) -> None:
self.fail('Explicit "Any" is not allowed', ctx)
def unexpected_typeddict_keys(
self,
typ: TypedDictType,
expected_keys: List[str],
actual_keys: List[str],
context: Context) -> None:
actual_set = set(actual_keys)
expected_set = set(expected_keys)
if not typ.is_anonymous():
# Generate simpler messages for some common special cases.
if actual_set < expected_set:
# Use list comprehension instead of set operations to preserve order.
missing = [key for key in expected_keys if key not in actual_set]
self.fail('{} missing for TypedDict {}'.format(
format_key_list(missing, short=True).capitalize(), format_type(typ)),
context, code=codes.TYPEDDICT_ITEM)
return
else:
extra = [key for key in actual_keys if key not in expected_set]
if extra:
# If there are both extra and missing keys, only report extra ones for
# simplicity.
self.fail('Extra {} for TypedDict {}'.format(
format_key_list(extra, short=True), format_type(typ)),
context, code=codes.TYPEDDICT_ITEM)
return
found = format_key_list(actual_keys, short=True)
if not expected_keys:
self.fail('Unexpected TypedDict {}'.format(found), context)
return
expected = format_key_list(expected_keys)
if actual_keys and actual_set < expected_set:
found = 'only {}'.format(found)
self.fail('Expected {} but found {}'.format(expected, found), context,
code=codes.TYPEDDICT_ITEM)
def typeddict_key_must_be_string_literal(
self,
typ: TypedDictType,
context: Context) -> None:
self.fail(
'TypedDict key must be a string literal; expected one of {}'.format(
format_item_name_list(typ.items.keys())), context)
def typeddict_key_not_found(
self,
typ: TypedDictType,
item_name: str,
context: Context) -> None:
if typ.is_anonymous():
self.fail('\'{}\' is not a valid TypedDict key; expected one of {}'.format(
item_name, format_item_name_list(typ.items.keys())), context)
else:
self.fail("TypedDict {} has no key '{}'".format(format_type(typ), item_name), context)
def typeddict_key_cannot_be_deleted(
self,
typ: TypedDictType,
item_name: str,
context: Context) -> None:
if typ.is_anonymous():
self.fail("TypedDict key '{}' cannot be deleted".format(item_name),
context)
else:
self.fail("Key '{}' of TypedDict {} cannot be deleted".format(
item_name, format_type(typ)), context)
def typeddict_setdefault_arguments_inconsistent(
self,
default: Type,
expected: Type,
context: Context) -> None:
msg = 'Argument 2 to "setdefault" of "TypedDict" has incompatible type {}; expected {}'
self.fail(msg.format(format_type(default), format_type(expected)), context)
def type_arguments_not_allowed(self, context: Context) -> None:
self.fail('Parameterized generics cannot be used with class or instance checks', context)
def disallowed_any_type(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
message = 'Expression has type "Any"'
else:
message = 'Expression type contains "Any" (has type {})'.format(format_type(typ))
self.fail(message, context)
def incorrectly_returning_any(self, typ: Type, context: Context) -> None:
message = 'Returning Any from function declared to return {}'.format(
format_type(typ))
self.fail(message, context, code=codes.NO_ANY_RETURN)
def untyped_decorated_function(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
self.fail("Function is untyped after decorator transformation", context)
else:
self.fail('Type of decorated function contains type "Any" ({})'.format(
format_type(typ)), context)
def typed_function_untyped_decorator(self, func_name: str, context: Context) -> None:
self.fail('Untyped decorator makes function "{}" untyped'.format(func_name), context)
def bad_proto_variance(self, actual: int, tvar_name: str, expected: int,
context: Context) -> None:
msg = capitalize("{} type variable '{}' used in protocol where"
" {} one is expected".format(variance_string(actual),
tvar_name,
variance_string(expected)))
self.fail(msg, context)
def concrete_only_assign(self, typ: Type, context: Context) -> None:
self.fail("Can only assign concrete classes to a variable of type {}"
.format(format_type(typ)), context)
def concrete_only_call(self, typ: Type, context: Context) -> None:
self.fail("Only concrete class can be given where {} is expected"
.format(format_type(typ)), context)
def cannot_use_function_with_type(
self, method_name: str, type_name: str, context: Context) -> None:
self.fail("Cannot use {}() with {} type".format(method_name, type_name), context)
def report_non_method_protocol(self, tp: TypeInfo, members: List[str],
context: Context) -> None:
self.fail("Only protocols that don't have non-method members can be"
" used with issubclass()", context)
if len(members) < 3:
attrs = ', '.join(members)
self.note('Protocol "{}" has non-method member(s): {}'
.format(tp.name(), attrs), context)
def note_call(self,
subtype: Type,
call: Type,
context: Context,
*,
code: Optional[ErrorCode]) -> None:
self.note('"{}.__call__" has type {}'.format(format_type_bare(subtype),
format_type(call, verbosity=1)),
context, code=code)
def unreachable_statement(self, context: Context) -> None:
self.fail("Statement is unreachable", context)
def redundant_left_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the left operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.redundant_expr("Left operand of '{}'".format(op_name), op_name == 'and', context)
def redundant_right_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the right operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.fail("Right operand of '{}' is never evaluated".format(op_name), context)
def redundant_condition_in_comprehension(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition in comprehension", truthiness, context)
def redundant_condition_in_if(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition", truthiness, context)
def redundant_condition_in_assert(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("Condition in assert", truthiness, context)
def redundant_expr(self, description: str, truthiness: bool, context: Context) -> None:
self.fail("{} is always {}".format(description, str(truthiness).lower()), context)
def report_protocol_problems(self,
subtype: Union[Instance, TupleType, TypedDictType],
supertype: Instance,
context: Context,
*,
code: Optional[ErrorCode]) -> None:
"""Report possible protocol conflicts between 'subtype' and 'supertype'.
This includes missing members, incompatible types, and incompatible
attribute flags, such as settable vs read-only or class variable vs
instance variable.
"""
OFFSET = 4 # Four spaces, so that notes will look like this:
# note: 'Cls' is missing following 'Proto' members:
# note: method, attr
MAX_ITEMS = 2 # Maximum number of conflicts, missing members, and overloads shown
# List of special situations where we don't want to report additional problems
exclusions = {TypedDictType: ['typing.Mapping'],
TupleType: ['typing.Iterable', 'typing.Sequence'],
Instance: []} # type: Dict[type, List[str]]
if supertype.type.fullname() in exclusions[type(subtype)]:
return
if any(isinstance(tp, UninhabitedType) for tp in get_proper_types(supertype.args)):
# We don't want to add notes for failed inference (e.g. Iterable[<nothing>]).
# This will be only confusing a user even more.
return
if isinstance(subtype, TupleType):
if not isinstance(subtype.partial_fallback, Instance):
return
subtype = subtype.partial_fallback
elif isinstance(subtype, TypedDictType):
if not isinstance(subtype.fallback, Instance):
return
subtype = subtype.fallback
# Report missing members
missing = get_missing_protocol_members(subtype, supertype)
if (missing and len(missing) < len(supertype.type.protocol_members) and
len(missing) <= MAX_ITEMS):
self.note("'{}' is missing following '{}' protocol member{}:"
.format(subtype.type.name(), supertype.type.name(), plural_s(missing)),
context,
code=code)
self.note(', '.join(missing), context, offset=OFFSET, code=code)
elif len(missing) > MAX_ITEMS or len(missing) == len(supertype.type.protocol_members):
# This is an obviously wrong type: too many missing members
return
# Report member type conflicts
conflict_types = get_conflict_protocol_types(subtype, supertype)
if conflict_types and (not is_subtype(subtype, erase_type(supertype)) or
not subtype.type.defn.type_vars or
not supertype.type.defn.type_vars):
self.note('Following member(s) of {} have '
'conflicts:'.format(format_type(subtype)),
context,
code=code)
for name, got, exp in conflict_types[:MAX_ITEMS]:
exp = get_proper_type(exp)
got = get_proper_type(got)
if (not isinstance(exp, (CallableType, Overloaded)) or
not isinstance(got, (CallableType, Overloaded))):
self.note('{}: expected {}, got {}'.format(name,
*format_type_distinctly(exp, got)),
context,
offset=OFFSET,
code=code)
else:
self.note('Expected:', context, offset=OFFSET, code=code)
if isinstance(exp, CallableType):
self.note(pretty_callable(exp), context, offset=2 * OFFSET, code=code)
else:
assert isinstance(exp, Overloaded)
self.pretty_overload(exp, context, OFFSET, MAX_ITEMS, code=code)
self.note('Got:', context, offset=OFFSET, code=code)
if isinstance(got, CallableType):
self.note(pretty_callable(got), context, offset=2 * OFFSET, code=code)
else:
assert isinstance(got, Overloaded)
self.pretty_overload(got, context, OFFSET, MAX_ITEMS, code=code)
self.print_more(conflict_types, context, OFFSET, MAX_ITEMS, code=code)
# Report flag conflicts (i.e. settable vs read-only etc.)
conflict_flags = get_bad_protocol_flags(subtype, supertype)
for name, subflags, superflags in conflict_flags[:MAX_ITEMS]:
if IS_CLASSVAR in subflags and IS_CLASSVAR not in superflags:
self.note('Protocol member {}.{} expected instance variable,'
' got class variable'.format(supertype.type.name(), name),
context,
code=code)
if IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags:
self.note('Protocol member {}.{} expected class variable,'
' got instance variable'.format(supertype.type.name(), name),
context,
code=code)
if IS_SETTABLE in superflags and IS_SETTABLE not in subflags:
self.note('Protocol member {}.{} expected settable variable,'
' got read-only attribute'.format(supertype.type.name(), name),
context,
code=code)
if IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags:
self.note('Protocol member {}.{} expected class or static method'
.format(supertype.type.name(), name),
context,
code=code)
self.print_more(conflict_flags, context, OFFSET, MAX_ITEMS, code=code)
def pretty_overload(self,
tp: Overloaded,
context: Context,
offset: int,
max_items: int,
*,
code: Optional[ErrorCode] = None) -> None:
for item in tp.items()[:max_items]:
self.note('@overload', context, offset=2 * offset, code=code)
self.note(pretty_callable(item), context, offset=2 * offset, code=code)
left = len(tp.items()) - max_items
if left > 0:
msg = '<{} more overload{} not shown>'.format(left, plural_s(left))
self.note(msg, context, offset=2 * offset, code=code)
def pretty_overload_matches(self,
targets: List[CallableType],
func: Overloaded,
context: Context,
offset: int,
max_items: int,
code: ErrorCode) -> None:
if not targets:
targets = func.items()
shown = min(max_items, len(targets))
max_matching = len(targets)
max_available = len(func.items())
# If there are 3 matches but max_items == 2, we might as well show
# all three items instead of having the 3rd item be an error message.
if shown + 1 == max_matching:
shown = max_matching
self.note('Possible overload variant{}:'.format(plural_s(shown)), context, code=code)
for item in targets[:shown]:
self.note(pretty_callable(item), context, offset=2 * offset, code=code)
assert shown <= max_matching <= max_available
if shown < max_matching <= max_available:
left = max_matching - shown
msg = '<{} more similar overload{} not shown, out of {} total overloads>'.format(
left, plural_s(left), max_available)
self.note(msg, context, offset=2 * offset, code=code)
elif shown == max_matching < max_available:
left = max_available - shown
msg = '<{} more non-matching overload{} not shown>'.format(left, plural_s(left))
self.note(msg, context, offset=2 * offset, code=code)
else:
assert shown == max_matching == max_available
def print_more(self,
conflicts: Sequence[Any],
context: Context,
offset: int,
max_items: int,
*,
code: Optional[ErrorCode] = None) -> None:
if len(conflicts) > max_items:
self.note('<{} more conflict(s) not shown>'
.format(len(conflicts) - max_items),
context, offset=offset, code=code)
def quote_type_string(type_string: str) -> str:
"""Quotes a type representation for use in messages."""
no_quote_regex = r'^<(tuple|union): \d+ items>$'
if (type_string in ['Module', 'overloaded function', '<nothing>', '<deleted>']
or re.match(no_quote_regex, type_string) is not None or type_string.endswith('?')):
# Messages are easier to read if these aren't quoted. We use a
# regex to match strings with variable contents.
return type_string
return '"{}"'.format(type_string)
def format_type_inner(typ: Type,
verbosity: int,
fullnames: Optional[Set[str]]) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
Args:
verbosity: a coarse grained control on the verbosity of the type
fullnames: a set of names that should be printed in full
"""
def format(typ: Type) -> str:
return format_type_inner(typ, verbosity, fullnames)
# TODO: show type alias names in errors.
typ = get_proper_type(typ)
if isinstance(typ, Instance):
itype = typ
# Get the short name of the type.
if itype.type.fullname() in ('types.ModuleType',
'_importlib_modulespec.ModuleType'):
# Make some common error messages simpler and tidier.
return 'Module'
if verbosity >= 2 or (fullnames and itype.type.fullname() in fullnames):
base_str = itype.type.fullname()
else:
base_str = itype.type.name()
if itype.args == []:
# No type arguments, just return the type name
return base_str
elif itype.type.fullname() == 'builtins.tuple':
item_type_str = format(itype.args[0])
return 'Tuple[{}, ...]'.format(item_type_str)
elif itype.type.fullname() in reverse_builtin_aliases:
alias = reverse_builtin_aliases[itype.type.fullname()]
alias = alias.split('.')[-1]
items = [format(arg) for arg in itype.args]
return '{}[{}]'.format(alias, ', '.join(items))
else:
# There are type arguments. Convert the arguments to strings.
# If the result is too long, replace arguments with [...].
a = [] # type: List[str]
for arg in itype.args:
a.append(format(arg))
s = ', '.join(a)
if len((base_str + s)) < 150:
return '{}[{}]'.format(base_str, s)
else:
return '{}[...]'.format(base_str)
elif isinstance(typ, TypeVarType):
# This is similar to non-generic instance types.
return typ.name
elif isinstance(typ, TupleType):
# Prefer the name of the fallback class (if not tuple), as it's more informative.
if typ.partial_fallback.type.fullname() != 'builtins.tuple':
return format(typ.partial_fallback)
items = []
for t in typ.items:
items.append(format(t))
s = 'Tuple[{}]'.format(', '.join(items))
if len(s) < 400:
return s
else:
return '<tuple: {} items>'.format(len(items))
elif isinstance(typ, TypedDictType):
# If the TypedDictType is named, return the name
if not typ.is_anonymous():
return format(typ.fallback)
items = []
for (item_name, item_type) in typ.items.items():
modifier = '' if item_name in typ.required_keys else '?'
items.append('{!r}{}: {}'.format(item_name,
modifier,
format(item_type)))
s = 'TypedDict({{{}}})'.format(', '.join(items))
return s
elif isinstance(typ, LiteralType):
if typ.is_enum_literal():
underlying_type = format(typ.fallback)
return 'Literal[{}.{}]'.format(underlying_type, typ.value)
else:
return str(typ)
elif isinstance(typ, UnionType):
# Only print Unions as Optionals if the Optional wouldn't have to contain another Union
print_as_optional = (len(typ.items) -
sum(isinstance(t, NoneType) for t in typ.items) == 1)
if print_as_optional:
rest = [t for t in typ.items if not isinstance(t, NoneType)]
return 'Optional[{}]'.format(format(rest[0]))
else:
items = []
for t in typ.items:
items.append(format(t))
s = 'Union[{}]'.format(', '.join(items))
if len(s) < 400:
return s
else:
return '<union: {} items>'.format(len(items))
elif isinstance(typ, NoneType):
return 'None'
elif isinstance(typ, AnyType):
return 'Any'
elif isinstance(typ, DeletedType):
return '<deleted>'
elif isinstance(typ, UninhabitedType):
if typ.is_noreturn:
return 'NoReturn'
else:
return '<nothing>'
elif isinstance(typ, TypeType):
return 'Type[{}]'.format(format(typ.item))
elif isinstance(typ, FunctionLike):
func = typ
if func.is_type_obj():
# The type of a type object type can be derived from the
# return type (this always works).
return format(TypeType.make_normalized(erase_type(func.items()[0].ret_type)))
elif isinstance(func, CallableType):
return_type = format(func.ret_type)
if func.is_ellipsis_args:
return 'Callable[..., {}]'.format(return_type)
arg_strings = []
for arg_name, arg_type, arg_kind in zip(
func.arg_names, func.arg_types, func.arg_kinds):
if (arg_kind == ARG_POS and arg_name is None
or verbosity == 0 and arg_kind in (ARG_POS, ARG_OPT)):
arg_strings.append(format(arg_type))
else:
constructor = ARG_CONSTRUCTOR_NAMES[arg_kind]
if arg_kind in (ARG_STAR, ARG_STAR2) or arg_name is None:
arg_strings.append("{}({})".format(
constructor,
format(arg_type)))
else:
arg_strings.append("{}({}, {})".format(
constructor,
format(arg_type),
repr(arg_name)))
return 'Callable[[{}], {}]'.format(", ".join(arg_strings), return_type)
else:
# Use a simple representation for function types; proper
# function types may result in long and difficult-to-read
# error messages.
return 'overloaded function'
elif isinstance(typ, UnboundType):
return str(typ)
elif typ is None:
raise RuntimeError('Type is None')
else:
# Default case; we simply have to return something meaningful here.
return 'object'
def collect_all_instances(t: Type) -> List[Instance]:
"""Return all instances that `t` contains (including `t`).
This is similar to collect_all_inner_types from typeanal but only
returns instances and will recurse into fallbacks.
"""
visitor = CollectAllInstancesQuery()
t.accept(visitor)
return visitor.instances
class CollectAllInstancesQuery(TypeTraverserVisitor):
def __init__(self) -> None:
self.instances = [] # type: List[Instance]
def visit_instance(self, t: Instance) -> None:
self.instances.append(t)
super().visit_instance(t)
def find_type_overlaps(*types: Type) -> Set[str]:
"""Return a set of fullnames that share a short name and appear in either type.
This is used to ensure that distinct types with the same short name are printed
with their fullname.
"""
d = {} # type: Dict[str, Set[str]]
for type in types:
for inst in collect_all_instances(type):
d.setdefault(inst.type.name(), set()).add(inst.type.fullname())
overlaps = set() # type: Set[str]
for fullnames in d.values():
if len(fullnames) > 1:
overlaps.update(fullnames)
return overlaps
def format_type(typ: Type, verbosity: int = 0) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse grained control on the verbosity of the type
This function returns a string appropriate for unmodified use in error
messages; this means that it will be quoted in most cases. If
modification of the formatted string is required, callers should use
format_type_bare.
"""
return quote_type_string(format_type_bare(typ, verbosity))
def format_type_bare(typ: Type,
verbosity: int = 0,
fullnames: Optional[Set[str]] = None) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse grained control on the verbosity of the type
`fullnames` specifies a set of names that should be printed in full
This function will return an unquoted string. If a caller doesn't need to
perform post-processing on the string output, format_type should be used
instead. (The caller may want to use quote_type_string after
processing has happened, to maintain consistent quoting in messages.)
"""
return format_type_inner(typ, verbosity, find_type_overlaps(typ))
def format_type_distinctly(type1: Type, type2: Type, bare: bool = False) -> Tuple[str, str]:
"""Jointly format a pair of types to distinct strings.
Increase the verbosity of the type strings until they become distinct
while also requiring that distinct types with the same short name are
formatted distinctly.
By default, the returned strings are created using format_type() and will be
quoted accordingly. If ``bare`` is True, the returned strings will not
be quoted; callers who need to do post-processing of the strings before
quoting them (such as prepending * or **) should use this.
"""
overlapping = find_type_overlaps(type1, type2)
for verbosity in range(2):
str1 = format_type_inner(type1, verbosity=verbosity, fullnames=overlapping)
str2 = format_type_inner(type2, verbosity=verbosity, fullnames=overlapping)
if str1 != str2:
break
if bare:
return (str1, str2)
else:
return (quote_type_string(str1), quote_type_string(str2))
def pretty_callable(tp: CallableType) -> str:
"""Return a nice easily-readable representation of a callable type.
For example:
def [T <: int] f(self, x: int, y: T) -> None
"""
s = ''
asterisk = False
for i in range(len(tp.arg_types)):
if s:
s += ', '
if tp.arg_kinds[i] in (ARG_NAMED, ARG_NAMED_OPT) and not asterisk:
s += '*, '
asterisk = True
if tp.arg_kinds[i] == ARG_STAR:
s += '*'
asterisk = True
if tp.arg_kinds[i] == ARG_STAR2:
s += '**'
name = tp.arg_names[i]
if name:
s += name + ': '
s += format_type_bare(tp.arg_types[i])
if tp.arg_kinds[i] in (ARG_OPT, ARG_NAMED_OPT):
s += ' = ...'
# If we got a "special arg" (i.e: self, cls, etc...), prepend it to the arg list
if isinstance(tp.definition, FuncDef) and tp.definition.name() is not None:
definition_args = tp.definition.arg_names
if definition_args and tp.arg_names != definition_args \
and len(definition_args) > 0:
if s:
s = ', ' + s
s = definition_args[0] + s
s = '{}({})'.format(tp.definition.name(), s)
elif tp.name:
first_arg = tp.def_extras.get('first_arg')
if first_arg:
if s:
s = ', ' + s
s = first_arg + s
s = '{}({})'.format(tp.name.split()[0], s) # skip "of Class" part
else:
s = '({})'.format(s)
s += ' -> ' + format_type_bare(tp.ret_type)
if tp.variables:
tvars = []
for tvar in tp.variables:
upper_bound = get_proper_type(tvar.upper_bound)
if (isinstance(upper_bound, Instance) and
upper_bound.type.fullname() != 'builtins.object'):
tvars.append('{} <: {}'.format(tvar.name, format_type_bare(upper_bound)))
elif tvar.values:
tvars.append('{} in ({})'
.format(tvar.name, ', '.join([format_type_bare(tp)
for tp in tvar.values])))
else:
tvars.append(tvar.name)
s = '[{}] {}'.format(', '.join(tvars), s)
return 'def {}'.format(s)
def variance_string(variance: int) -> str:
if variance == COVARIANT:
return 'covariant'
elif variance == CONTRAVARIANT:
return 'contravariant'
else:
return 'invariant'
def get_missing_protocol_members(left: Instance, right: Instance) -> List[str]:
"""Find all protocol members of 'right' that are not implemented
(i.e. completely missing) in 'left'.
"""
assert right.type.is_protocol
missing = [] # type: List[str]
for member in right.type.protocol_members:
if not find_member(member, left, left):
missing.append(member)
return missing
def get_conflict_protocol_types(left: Instance, right: Instance) -> List[Tuple[str, Type, Type]]:
"""Find members that are defined in 'left' but have incompatible types.
Return them as a list of ('member', 'got', 'expected').
"""
assert right.type.is_protocol
conflicts = [] # type: List[Tuple[str, Type, Type]]
for member in right.type.protocol_members:
if member in ('__init__', '__new__'):
continue
supertype = find_member(member, right, left)
assert supertype is not None
subtype = find_member(member, left, left)
if not subtype:
continue
is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True)
if IS_SETTABLE in get_member_flags(member, right.type):
is_compat = is_compat and is_subtype(supertype, subtype)
if not is_compat:
conflicts.append((member, subtype, supertype))
return conflicts
def get_bad_protocol_flags(left: Instance, right: Instance
) -> List[Tuple[str, Set[int], Set[int]]]:
"""Return all incompatible attribute flags for members that are present in both
'left' and 'right'.
"""
assert right.type.is_protocol
all_flags = [] # type: List[Tuple[str, Set[int], Set[int]]]
for member in right.type.protocol_members:
if find_member(member, left, left):
item = (member,
get_member_flags(member, left.type),
get_member_flags(member, right.type))
all_flags.append(item)
bad_flags = []
for name, subflags, superflags in all_flags:
if (IS_CLASSVAR in subflags and IS_CLASSVAR not in superflags or
IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags or
IS_SETTABLE in superflags and IS_SETTABLE not in subflags or
IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags):
bad_flags.append((name, subflags, superflags))
return bad_flags
def capitalize(s: str) -> str:
"""Capitalize the first character of a string."""
if s == '':
return ''
else:
return s[0].upper() + s[1:]
def extract_type(name: str) -> str:
"""If the argument is the name of a method (of form C.m), return
the type portion in quotes (e.g. "y"). Otherwise, return the string
unmodified.
"""
name = re.sub('^"[a-zA-Z0-9_]+" of ', '', name)
return name
def strip_quotes(s: str) -> str:
"""Strip a double quote at the beginning and end of the string, if any."""
s = re.sub('^"', '', s)
s = re.sub('"$', '', s)
return s
def plural_s(s: Union[int, Sequence[Any]]) -> str:
count = s if isinstance(s, int) else len(s)
if count > 1:
return 's'
else:
return ''
def format_string_list(lst: List[str]) -> str:
assert len(lst) > 0
if len(lst) == 1:
return lst[0]
elif len(lst) <= 5:
return '%s and %s' % (', '.join(lst[:-1]), lst[-1])
else:
return '%s, ... and %s (%i methods suppressed)' % (
', '.join(lst[:2]), lst[-1], len(lst) - 3)
def format_item_name_list(s: Iterable[str]) -> str:
lst = list(s)
if len(lst) <= 5:
return '(' + ', '.join(["'%s'" % name for name in lst]) + ')'
else:
return '(' + ', '.join(["'%s'" % name for name in lst[:5]]) + ', ...)'
def callable_name(type: FunctionLike) -> Optional[str]:
name = type.get_name()
if name is not None and name[0] != '<':
return '"{}"'.format(name).replace(' of ', '" of "')
return name
def for_function(callee: CallableType) -> str:
name = callable_name(callee)
if name is not None:
return ' for {}'.format(name)
return ''
def find_defining_module(modules: Dict[str, MypyFile], typ: CallableType) -> Optional[MypyFile]:
if not typ.definition:
return None
fullname = typ.definition.fullname()
if fullname is not None and '.' in fullname:
for i in range(fullname.count('.')):
module_name = fullname.rsplit('.', i + 1)[0]
try:
return modules[module_name]
except KeyError:
pass
assert False, "Couldn't determine module from CallableType"
return None
def temp_message_builder() -> MessageBuilder:
"""Return a message builder usable for throwaway errors (which may not format properly)."""
return MessageBuilder(Errors(), {})
# For hard-coding suggested missing member alternatives.
COMMON_MISTAKES = {
'add': ('append', 'extend'),
} # type: Final[Dict[str, Sequence[str]]]
def best_matches(current: str, options: Iterable[str]) -> List[str]:
ratios = {v: difflib.SequenceMatcher(a=current, b=v).ratio() for v in options}
return sorted((o for o in options if ratios[o] > 0.75),
reverse=True, key=lambda v: (ratios[v], v))
def pretty_or(args: List[str]) -> str:
quoted = ['"' + a + '"' for a in args]
if len(quoted) == 1:
return quoted[0]
if len(quoted) == 2:
return "{} or {}".format(quoted[0], quoted[1])
return ", ".join(quoted[:-1]) + ", or " + quoted[-1]
def append_invariance_notes(notes: List[str], arg_type: Instance,
expected_type: Instance) -> List[str]:
"""Explain that the type is invariant and give notes for how to solve the issue."""
invariant_type = ''
covariant_suggestion = ''
if (arg_type.type.fullname() == 'builtins.list' and
expected_type.type.fullname() == 'builtins.list' and
is_subtype(arg_type.args[0], expected_type.args[0])):
invariant_type = 'List'
covariant_suggestion = 'Consider using "Sequence" instead, which is covariant'
elif (arg_type.type.fullname() == 'builtins.dict' and
expected_type.type.fullname() == 'builtins.dict' and
is_same_type(arg_type.args[0], expected_type.args[0]) and
is_subtype(arg_type.args[1], expected_type.args[1])):
invariant_type = 'Dict'
covariant_suggestion = ('Consider using "Mapping" instead, '
'which is covariant in the value type')
if invariant_type and covariant_suggestion:
notes.append(
'"{}" is invariant -- see '.format(invariant_type) +
'http://mypy.readthedocs.io/en/latest/common_issues.html#variance')
notes.append(covariant_suggestion)
return notes
def make_inferred_type_note(context: Context, subtype: Type,
supertype: Type, supertype_str: str) -> str:
"""Explain that the user may have forgotten to type a variable.
The user does not expect an error if the inferred container type is the same as the return
type of a function and the argument type(s) are a subtype of the argument type(s) of the
return type. This note suggests that they add a type annotation with the return type instead
of relying on the inferred type.
"""
subtype = get_proper_type(subtype)
supertype = get_proper_type(supertype)
if (isinstance(subtype, Instance) and
isinstance(supertype, Instance) and
subtype.type.fullname() == supertype.type.fullname() and
subtype.args and
supertype.args and
isinstance(context, ReturnStmt) and
isinstance(context.expr, NameExpr) and
isinstance(context.expr.node, Var) and
context.expr.node.is_inferred):
for subtype_arg, supertype_arg in zip(subtype.args, supertype.args):
if not is_subtype(subtype_arg, supertype_arg):
return ''
var_name = context.expr.name
return 'Perhaps you need a type annotation for "{}"? Suggestion: {}'.format(
var_name, supertype_str)
return ''
def format_key_list(keys: List[str], *, short: bool = False) -> str:
reprs = [repr(key) for key in keys]
td = '' if short else 'TypedDict '
if len(keys) == 0:
return 'no {}keys'.format(td)
elif len(keys) == 1:
return '{}key {}'.format(td, reprs[0])
else:
return '{}keys ({})'.format(td, ', '.join(reprs))
|
#!/usr/bin/env python3
#
# __init__.py
"""
Pure-python implementation of some unicodedata functions.
"""
#
# Based on CPython.
# Licensed under the Python Software Foundation License Version 2.
# Copyright © 2001-2020 Python Software Foundation. All rights reserved.
# Copyright © 2000 BeOpen.com. All rights reserved.
# Copyright © 1995-2000 Corporation for National Research Initiatives. All rights reserved.
# Copyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved.
#
# See the LICENSE file for details.
#
# stdlib
import unicodedata
# this package
from pyunicodedata._c_unicodedata import Py_UNICODE_TODECIMAL, Py_UNICODE_TODIGIT, Py_UNICODE_TONUMERIC
__author__: str = "Dominic Davis-Foster"
__license__: str = "PSF"
__version__: str = "0.0.0"
__email__: str = "dominic@davis-foster.co.uk"
__all__ = ["decimal", "digit", "numeric"]
MISSING = object()
def decimal(chr: str, default=MISSING): # noqa: A002 # pylint: disable=redefined-builtin
"""
Returns the decimal value assigned to the character chr as integer.
If no such value is defined, default is returned, or, if not given, ValueError is raised.
:param chr:
:param default:
"""
# TODO: get_old_record
rc: int = Py_UNICODE_TODECIMAL(chr)
if rc < 0:
if default is MISSING:
raise ValueError("not a decimal")
else:
return default
return rc
def digit(chr: str, default=MISSING): # noqa: A002 # pylint: disable=redefined-builtin
"""
Returns the digit value assigned to the character chr as integer.
If no such value is defined, default is returned, or, if not given, ValueError is raised.
:param chr:
:param default:
"""
rc: int = Py_UNICODE_TODIGIT(chr)
if rc < 0:
if default is MISSING:
raise ValueError("not a digit")
else:
return default
return rc
def numeric(chr: str, default=MISSING): # noqa: A002 # pylint: disable=redefined-builtin
"""
Returns the numeric value assigned to the character chr as float.
If no such value is defined, default is returned, or, if not given, ValueError is raised.
:param chr:
:param default:
"""
# TODO: get_old_record
rc: float = Py_UNICODE_TONUMERIC(chr)
if rc == -1.0:
if default is MISSING:
raise ValueError("not a numeric character")
else:
return default
return rc
# combining
# east asian width
# mirrored
# decomposition
# is_normalized
def install_patch():
if not hasattr(unicodedata, "decimal"):
unicodedata.decimal = decimal
if not hasattr(unicodedata, "digit"):
unicodedata.digit = digit
if not hasattr(unicodedata, "numeric"):
unicodedata.numeric = numeric
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['SyncGroup']
class SyncGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
sync_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Sync Group object.
API Version: 2020-03-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] storage_sync_service_name: Name of Storage Sync Service resource.
:param pulumi.Input[str] sync_group_name: Name of Sync Group resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if storage_sync_service_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_sync_service_name'")
__props__['storage_sync_service_name'] = storage_sync_service_name
__props__['sync_group_name'] = sync_group_name
__props__['name'] = None
__props__['sync_group_status'] = None
__props__['type'] = None
__props__['unique_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagesync/latest:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20170605preview:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180402:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180701:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20181001:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190201:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190301:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190601:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20191001:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200301:SyncGroup"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200901:SyncGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SyncGroup, __self__).__init__(
'azure-nextgen:storagesync:SyncGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SyncGroup':
"""
Get an existing SyncGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SyncGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="syncGroupStatus")
def sync_group_status(self) -> pulumi.Output[str]:
"""
Sync group status
"""
return pulumi.get(self, "sync_group_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> pulumi.Output[str]:
"""
Unique Id
"""
return pulumi.get(self, "unique_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
#!/usr/bin/python
# Copyright (C) 2018 Stephen Farrell, stephen.farrell@cs.tcd.ie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Fix up p443san - p587 names overwrote p443 names when both
# were present, or if only p587 was present.
# Fix is to delve back into records.fresh, pick out
# the right record, update the p443 names and then produce a new
# collisions.json file. After that, usual make targets can be used
# to recrate graphs etc. as desired and they should be the same.
# Crap - same problem for p993 overwriting p443 names! Sigh
# For the IE-20180316 run, we need to check/fix 2959 from 9765 records
# for p587
import os, re, sys, argparse, tempfile, gc
import json
import jsonpickle # install via "$ sudo pip install -U jsonpickle"
import time, datetime
from dateutil import parser as dparser # for parsing time from comand line and certs
import pytz # for adding back TZ info to allow comparisons
# our own stuff
from SurveyFuncs import *
# default values
infile="records.fresh"
outfile="collisions.json"
# command line arg handling
argparser=argparse.ArgumentParser(description='Fix mcuked-up p443san records for collisions')
argparser.add_argument('-i','--input',
dest='infile',
help='file containing previously generated collisions')
argparser.add_argument('-o','--output_file',
dest='outfile',
help='file in which to put fixed json records')
args=argparser.parse_args()
def usage():
print >>sys.stderr, "usage: " + sys.argv[0] + " -i <infile> -o <putfile> "
print >>sys.stderr, " both inputs are mandatory and must differ"
if args.infile is None:
print "You need to supply all inputs"
usage()
infile=args.infile
if args.outfile is None:
print "You need to supply all inputs"
usage()
outfile=args.outfile
if infile==outfile:
print "can't overwrite input with output"
usage()
def certsfromrf(ip,rf):
# will do real code here shortly...
# search for the ip from records.fresh
# the file pointer for records.fresh should (I hope) be ok to
# move along in natural order given how we made collisions.json
# in the first place - this wouldn't generally be true
# we really should also always find the IP, if not then scream!!!
found=False
#print "Called certfromrf looking for " + ip + ", rf.tell says: " + str(rf.tell())
for line in rf:
if re.search('"ip": "'+ip+'"',line):
#print "Found " + ip + " in records.fresh at offset " + str(rf.tell())
found=True
break
if not found:
print >>sys.stderr, "EEK - No sign of " + ip + " in records.fresh at offset " + str(rf.tell())
sys.exit(99)
# decode the json for that ip
j_content = json.loads(line)
certs={}
try:
# FreshGrab.py sourced version
certs['p443']=j_content['p443']['data']['http']['response']['request']['tls_handshake']['server_certificates']['certificate']
except:
try:
# censys.io sourced version
certs['p443']=j_content['p443']['https']['tls']['certificate']
except:
pass
try:
certs['p587']=j_content['p587']['data']['tls']['server_certificates']['certificate']
except:
# censys.io has no p587, but sure we'll try anyway - EE/2017 has 1 (yes 1!!) such record, somehow
pass
try:
certs['p993']=j_content['p993']['data']['tls']['server_certificates']['certificate']
except:
try:
certs['p993']=j_content['p993']['imaps']['tls']['tls']['certificate']['parsed']
except:
pass
if len(certs)==0:
print "EEK - Cen't find any certs for " + ip
if 'p443' in j_content:
print j_content['p443']
if 'p587' in j_content:
print j_content['p587']
if 'p993' in j_content:
print j_content['p993']
sys.exit(98)
# we're done - return the cert
return certs
# fixup function
def fix443names(f,rf):
# zap names
# grab f.ip record p443 server cert from records.fresh into cert
certs=certsfromrf(f.ip,rf)
nameset=f.analysis['nameset']
for pnum in 443,587,993:
portstring='p'+str(pnum)
if portstring not in certs:
if portstring+'dn' in f.analysis['nameset']:
del f.analysis['nameset']['p443dn']
oldsancount=0
elname=portstring+'san'+str(oldsancount)
while elname in f.analysis['nameset']:
del f.analysis['nameset'][elname]
oldsancount += 1
elname=portstring+'san'+str(oldsancount)
continue
dn=certs[portstring]['parsed']['subject_dn']
dn_fqdn=dn2cn(dn)
nameset[portstring+'dn'] = dn_fqdn
# name from cert SAN
# zap old sans
oldsancount=0
elname=portstring+'san'+str(oldsancount)
while elname in nameset:
del nameset[elname]
oldsancount += 1
elname=portstring+'san'+str(oldsancount)
# and repair from cert
if 'subject_alt_name' in certs[portstring]['parsed']['extensions']:
sans=certs[portstring]['parsed']['extensions']['subject_alt_name']
if 'dns_names' in sans:
san_fqdns=sans['dns_names']
# we ignore all non dns_names - there are very few in our data (maybe 145 / 12000)
# and they're mostly otherName with opaque OID/value so not that useful. (A few
# are emails but we'll skip 'em for now)
#print "FQDN san " + str(san_fqdns)
sancount=0
for san in san_fqdns:
nameset[portstring+'san'+str(sancount)]=san_fqdns[sancount]
sancount += 1
# there are some CRAAAAAAZZZY huge certs out there - saw one with >1500 SANs
# which slows us down loads, so we'll just max out at 20
if sancount >= MAXSAN:
toobig=str(len(san_fqdns))
nameset['san'+str(sancount+1)]="Bollox-eoo-many-sans-1-" + toobig
print >> sys.stderr, "Too many bleeding ( " + toobig + ") sans "
break
for elname in sans:
if elname != 'dns_names':
print "SAN found with non dns_name for " + f.ip
print "\t" + str(sans)
break
return True
# mainline processing
# open records.fresh
rf=open("records.fresh","r")
# open file
fp=open(infile,"r")
jsonpickle.set_encoder_options('json', sort_keys=True, indent=2)
colf=open(outfile,"w")
colf.write('[\n')
firstone=True
overallcount=0
fixcount=0
f=getnextfprint(fp)
while f:
# if we have either port we have a thing to fix
if ('p587' in f.fprints) or ('p993' in f.fprints):
fix443names(f,rf)
fixcount += 1
# write it out, fixed or not
bstr=jsonpickle.encode(f,unpicklable=False)
if not firstone:
colf.write('\n,\n')
firstone=False
colf.write(bstr)
del bstr
if overallcount % 100 == 0:
print >> sys.stderr, "Repairing colisions, did: " + str(overallcount) + \
" fixed: " + str(fixcount)
f=getnextfprint(fp)
overallcount += 1
fp.close()
rf.close()
colf.write('\n]\n')
colf.close()
print >> sys.stderr, "Done epairing colisions, did: " + str(overallcount) + \
" fixed: " + str(fixcount)
|
import cv2
TYPEMAP = {
'THRESH_BINARY': cv2.THRESH_BINARY,
'THRESH_BINARY_INV': cv2.THRESH_BINARY_INV,
'THRESH_TRUNC': cv2.THRESH_TRUNC,
'THRESH_TOZERO': cv2.THRESH_TOZERO,
'THRESH_TOZERO_INV': cv2.THRESH_TOZERO_INV,
}
def threshold(src, mode, thresh_type, thresh, maxval):
if mode == 'simple':
return cv2.threshold(src, thresh, maxval, TYPEMAP[thresh_type])
elif mode == 'otsu':
return cv2.threshold(src, 0, maxval, TYPEMAP[thresh_type] + cv2.THRESH_OTSU)
else:
raise KeyError('Wrong function name')
def adaptive_threshold(src, mode, thresh_type, maxval, bsize, c):
if mode == 'mean':
return cv2.adaptiveThreshold(src, maxval, cv2.ADAPTIVE_THRESH_MEAN_C, TYPEMAP[thresh_type], bsize, c)
elif mode == 'gaussian':
return cv2.adaptiveThreshold(src, maxval, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, TYPEMAP[thresh_type], bsize, c)
else:
raise KeyError('Wrong function name')
def apply(src, method, mode, thresh_type, *args, **kwargs):
if method == 'global':
return threshold(src, mode, thresh_type, *args, **kwargs)
elif method == 'adaptive':
return adaptive_threshold(src, mode, thresh_type, *args, **kwargs)
else:
raise KeyError('Wrong function name')
|
import re
def arithmetic_arranger(problems, solve=False):
if (len(problems) > 5):
return "Error: Too many problems."
firstline = ""
secondline = ""
dashes = ""
answer = ""
arranged_problems = ""
for problem in problems:
if (re.search("[^\s0-9.+-]", problem)):
if (re.search("[/]", problem) or re.search("[*]", problem)):
return "Error: Operator must be '+' or '-'."
return "Error: Numbers must only contain digits."
firstnum = problem.split(" ")[0]
operator = problem.split(" ")[1]
secondnum = problem.split(" ")[2]
if (len(firstnum) >= 5 or len(secondnum) >= 5):
return "Error: Numbers cannot be more than four digits."
sum = ""
if (operator == "+"):
sum = str(int(firstnum) + int(secondnum))
elif (operator == "-"):
sum = str(int(firstnum) - int(secondnum))
length = (max(len(firstnum), len(secondnum))) + 2
first = str(firstnum).rjust(length)
second = operator + str(secondnum).rjust(length - 1)
dash = ""
total = str(sum).rjust(length)
for num in range(length):
dash += "-"
if problem != problems[-1]:
firstline += first + " "
secondline += second + " "
dashes += dash + " "
answer += total + " "
else:
firstline += first
secondline += second
dashes += dash
answer += total
if solve == True:
arranged_problems = firstline + "\n" + secondline + "\n" + dashes + "\n" + answer
else:
arranged_problems = firstline + "\n" + secondline + "\n" + dashes
return arranged_problems
|
# noinspection PyUnresolvedReferences
from qgis.core import QgsAbstractFeatureSource, QgsFeatureIterator, QgsLogger
from .iterator import DjangoFeatureIterator
class DjangoFeatureSource(QgsAbstractFeatureSource):
def __init__(self, provider, model, qgs_fields, dj_fields, dj_geo_field, crs, is_valid):
QgsLogger.debug('DjangoFeatureSource.__init__ model = {}'.format(model), 10)
super(DjangoFeatureSource, self).__init__()
self.provider = provider
self.model = model
self.qgs_fields = qgs_fields
self.dj_fields = dj_fields
self.dj_geo_field = dj_geo_field
self.crs = crs
self.is_valid = is_valid
self.iterator = None
def getFeatures(self, request):
QgsLogger.debug('DjangoFeatureSource.getFeatures() request = {}'.format(request), 10)
# Returning QgsFeatureIterator(DjangoFeatureIterator(self, request)) without keeping reference to at least
# QgsFeatureIterator or DjangoFeatureIterator does not work anymore. Asked on QGIS list:
# https://lists.osgeo.org/pipermail/qgis-developer/2020-August/062076.html
# It seems to work if wee keep a reference to just latest created iterator (instead of list of iterators)
# return QgsFeatureIterator(DjangoFeatureIterator(self, request))
self.iterator = QgsFeatureIterator(DjangoFeatureIterator(self.provider, self, request, self.is_valid))
return self.iterator
|
feuille = [
["rouge", [1, 3]],
["blanc", [0, 2, 4]],
["rouge", [1, 5]],
["rouge", [0, 4, 6]],
["rouge", [1, 3, 5, 7]],
["rouge", [2, 4, 8]],
["blanc", [3, 7]],
["rouge", [6, 4, 8]],
["blanc", [5, 7]]
]
def remplissage(feuille, i, courante):
[remplacer, voisins] = feuille[i]
feuille[i] = [courante, voisins]
pixels_accessibles = voisins
pixels_deja_vus = [False]*len(feuille)
while (len(pixels_accessibles) != 0):
p = pixels_accessibles[0]
pixels_deja_vus[p] = True
pixels_accessibles = pixels_accessibles[1:]
[c, v] = feuille[p]
if (c == remplacer):
feuille[p] = [courante, v]
voisins_a_ajouter = []
for voisin in v:
if (not pixels_deja_vus[voisin]):
pixels_accessibles = [voisin] + pixels_accessibles
return(feuille)
print(remplissage(feuille, 7, "bleu"))
|
def ficha(jog='desconhecido', gol=0):
print(f'O jogador {jog} fez {gol} gol(s) no campeonato. ')
#Programa principal
n = str(input("Nome do jogador: "))
g = str(input("Numero de Gols: "))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(gol=g)
else:
ficha(n,g)
|
#!coding: utf-8
import os
import shutil
import textwrap
from ..util.compat import u, has_pep3147, get_current_bytecode_suffixes
from ..script import Script, ScriptDirectory
from .. import util
from . import engines
from . import provision
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return "scratch_%s" % provision.FOLLOWER_IDENT
else:
return 'scratch'
def staging_env(create=True, template="generic", sourceless=False):
from alembic import command, script
cfg = _testing_config()
if create:
path = os.path.join(_get_staging_directory(), 'scripts')
if os.path.exists(path):
shutil.rmtree(path)
command.init(cfg, path, template=template)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, 'env.py')
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
assert sourceless in (
"pep3147_envonly", "simple", "pep3147_everything"), sourceless
make_sourceless(
os.path.join(path, "env.py"),
"pep3147" if "pep3147" in sourceless else "simple"
)
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
path = os.path.join(dir_, "script.py.mako")
with open(path, 'w') as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
txt = """
from alembic import context
config = context.config
""" + txt
path = os.path.join(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
with open(path, 'w') as f:
f.write(txt)
def _sqlite_file_db(tempname="foo.db"):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/%s" % (dir_, tempname)
return engines.testing_engine(url=url)
def _sqlite_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false"))
def _multi_dir_testing_config(sourceless=False, extra_version_location=''):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false",
extra_version_location))
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s://
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, dialect, directives))
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, 'w') as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(os.path.join(_get_staging_directory(), 'test_alembic.ini'))
def write_script(
scriptdir, rev_id, content, encoding='ascii', sourceless=False):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, 'wb') as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception("Can't change down_revision "
"on a refresh operation.")
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(
path,
"pep3147" if sourceless == "pep3147_everything" else "simple"
)
def make_sourceless(path, style):
import py_compile
py_compile.compile(path)
if style == "simple" and has_pep3147():
pyc_path = util.pyc_file_from_path(path)
suffix = get_current_bytecode_suffixes()[0]
filepath, ext = os.path.splitext(path)
simple_pyc_path = filepath + suffix
shutil.move(pyc_path, simple_pyc_path)
pyc_path = simple_pyc_path
elif style == "pep3147" and not has_pep3147():
raise NotImplementedError()
else:
assert style in ("pep3147", "simple")
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True)
write_script(script, a, """\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
""" % a)
script.generate_revision(b, "revision b", refresh=True)
write_script(script, b, u("""# coding: utf-8
"Rev B, méil, %3"
revision = '{}'
down_revision = '{}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
""").format(b, a), encoding="utf-8")
script.generate_revision(c, "revision c", refresh=True)
write_script(script, c, """\
"Rev C"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
""" % (c, b))
return a, b, c
def multi_heads_fixture(cfg, a, b, c):
"""Create a multiple head fixture from the three-revs fixture"""
d = util.rev_id()
e = util.rev_id()
f = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(
d, "revision d from b", head=b, splice=True, refresh=True)
write_script(script, d, """\
"Rev D"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 4")
def downgrade():
op.execute("DROP STEP 4")
""" % (d, b))
script.generate_revision(
e, "revision e from d", head=d, splice=True, refresh=True)
write_script(script, e, """\
"Rev E"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 5")
def downgrade():
op.execute("DROP STEP 5")
""" % (e, d))
script.generate_revision(
f, "revision f from b", head=b, splice=True, refresh=True)
write_script(script, f, """\
"Rev F"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 6")
def downgrade():
op.execute("DROP STEP 6")
""" % (f, b))
return d, e, f
def _multidb_testing_config(engines):
"""alembic.ini fixture to work exactly with the 'multidb' template"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
databases = ", ".join(
engines.keys()
)
engines = "\n\n".join(
"[%s]\n"
"sqlalchemy.url = %s" % (key, value.url)
for key, value in engines.items()
)
return _write_config_file("""
[alembic]
script_location = %s
sourceless = false
databases = %s
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, databases, engines)
)
|
"""Available Commands:
.mf"""
import asyncio
from telethon import functions
from mafiabot.utils import admin_cmd, sudo_cmd, edit_or_reply
from userbot.cmdhelp import CmdHelp
@bot.on(admin_cmd(pattern=r"dc")) # pylint:disable=E0602
@bot.on(sudo_cmd(pattern=r"dc", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetNearestDcRequest()) # pylint:disable=E0602
await edit_or_reply(event, result.stringify())
@bot.on(admin_cmd(pattern=r"config")) # pylint:disable=E0602
@bot.on(sudo_cmd(pattern=r"config", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetConfigRequest()) # pylint:disable=E0602
result = result.stringify()
logger.info(result) # pylint:disable=E0602
await event.edit("""Telethon UserBot powered by @MafiaBot_Support""")
CmdHelp("bot").add_command(
"dc", None, "Gets the DataCenter Number"
).add_command(
"config", None, "😒"
).add()
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-11 02:47
from hanlp.common.dataset import SortingSamplerBuilder
from hanlp.components.tokenizers.transformer import TransformerTaggingTokenizer
from hanlp.datasets.tokenization.sighan2005 import SIGHAN2005_PKU_TRAIN_ALL, SIGHAN2005_PKU_TEST
from tests import cdroot
cdroot()
tokenizer = TransformerTaggingTokenizer()
save_dir = 'data/model/cws/sighan2005_pku_bert_base_96.70'
tokenizer.fit(
SIGHAN2005_PKU_TRAIN_ALL,
SIGHAN2005_PKU_TEST, # Conventionally, no devset is used. See Tian et al. (2020).
save_dir,
'bert-base-chinese',
max_seq_len=300,
char_level=True,
hard_constraint=True,
sampler_builder=SortingSamplerBuilder(batch_size=32),
epochs=3,
adam_epsilon=1e-6,
warmup_steps=0.1,
weight_decay=0.01,
word_dropout=0.1,
seed=1609836303,
)
tokenizer.evaluate(SIGHAN2005_PKU_TEST, save_dir)
print(f'Model saved in {save_dir}')
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <randonlang@gmail.com>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import os
import numpy as np
from jittor import compile_extern
from jittor.test.test_log import find_log_with_re
if jt.has_cuda:
from jittor.compile_extern import cublas_ops, cudnn_ops
else:
cublas_ops = cudnn_ops = None
def conv_oihw(x, w, stride=1, padding=0, dilation=1):
assert type(stride)==int and type(padding)==int
N,H,W,C = x.shape
# Kh,Kw,C2,c = w.shape
c,C2,Kh,Kw = w.shape
oh, ow = (H-Kh*dilation+dilation-1+padding*2)//stride+1, (W-Kw*dilation+dilation-1+padding*2)//stride+1
assert C2==C or C2==1, (C2, C)
x = x.reindex([N,oh,ow,c,C2,Kh,Kw], [
'i0', # Nid = Nid
f'i1*{stride}+i5*{dilation}-{padding}', # Hid = ohid*stride+Khid
f'i2*{stride}+i6*{dilation}-{padding}', # Wid = owid*stride+Kwid
'i3' if C2==1 and C>1 else 'i4', # depthwise or normal
])
y = (x*w).sum([4,5,6]) # Kh, Kw, C
return y
def conv(x, w, stride, padding):
out_planes, in_planes, kernel_size, _ = w.shape
Kw = kernel_size
Kh = kernel_size
_C = in_planes
Kc = out_planes
N,C,H,W = x.shape
assert C==_C
xx = x.reindex([N,Kc,C,(H+padding*2-kernel_size)//stride+1,(W+padding*2-kernel_size)//stride+1,Kh,Kw], [
'i0', # Nid
'i2', # Cid
f'i3*{stride}-{padding}+i5', # Hid+Khid
f'i4*{stride}-{padding}+i6', # Wid+KWid
])
ww = w.broadcast(xx.shape, [0,3,4])
yy = xx*ww
y = yy.sum([2,5,6]) # Kc, Kh, Kw
return y
@unittest.skipIf(cudnn_ops==None, "Not use cudnn, Skip")
class TestCudnnConvOp(unittest.TestCase):
def test(self):
def check(xshape, wshape, stride=1, padding=0, dilation=1):
with jt.log_capture_scope(use_cuda=1, enable_tuner=1,
log_v=0, log_vprefix="op.cc=100"
) as raw_log:
x = jt.random(xshape)
w = jt.random(wshape)
y = conv_oihw(x, w, stride, padding, dilation)
y.sync()
with jt.flag_scope(use_cuda=0, enable_tuner=1):
cy = conv_oihw(x, w, stride, padding, dilation)
cy.sync()
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: cudnn_conv.*)")
assert len(logs)==1 and "oihw" in logs[0][0], logs
assert np.allclose(y.data, cy.data), np.abs(y.data-cy.data).max()
check([10,100,100,3], [5,3,3,3], stride=2, padding=0, dilation=1)
check([10,40,50,4], [5,4,5,5], stride=1, padding=1, dilation=1)
check([10,40,50,4], [5,4,4,4], stride=3, padding=1, dilation=1)
def test_backward_nhwc(self):
# TODO: cudnn backward do not support nhwc
return
def check(xshape, wshape, stride=1, padding=0, dilation=1):
with jt.log_capture_scope(use_cuda=1, enable_tuner=1,
log_v=0, log_vprefix="op.cc=100"
) as raw_log:
x = jt.random(xshape)
w = jt.random(wshape)
y = conv_oihw(x, w, stride, padding, dilation)
mask = jt.random(y.shape)
loss = mask * y
dx, dw = jt.grad(loss, [x, w])
jt.sync([y, loss, dx, dw])
with jt.flag_scope(use_cuda=0, enable_tuner=0):
cy = conv_oihw(x, w, stride, padding, dilation)
closs = mask * cy
cdx, cdw = jt.grad(closs, [x, w])
jt.sync([cy, closs, cdx, cdw])
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: cudnn_conv.*)")
assert len(logs)==3 and "oihw" in logs[0][0], logs
assert np.allclose(y.data, cy.data)
assert np.allclose(dx.data, cdx.data)
assert np.allclose(dw.data, cdw.data)
check([10,100,100,3], [5,3,3,3], stride=2, padding=0, dilation=1)
check([10,40,50,4], [5,4,5,5], stride=1, padding=1, dilation=1)
check([10,40,50,4], [5,4,4,4], stride=3, padding=1, dilation=1)
def test_backward(self):
def check(xshape, wshape, stride=1, padding=0, dilation=1):
with jt.log_capture_scope(use_cuda=1, enable_tuner=1,
log_v=1, log_vprefix="op.cc=100,exe=1000"
) as raw_log:
x = jt.random(xshape)
w = jt.random(wshape)
y = conv(x, w, stride, padding)
mask = jt.random(y.shape)
loss = mask * y
dx, dw = jt.grad(loss, [x, w])
jt.sync([y, loss, dx, dw])
# fails when enable_tuner=1, something wrong with mkl_conv_backward_x maybe.
with jt.flag_scope(use_cuda=0, enable_tuner=0):
cy = conv(x, w, stride, padding)
closs = mask * cy
cdx, cdw = jt.grad(closs, [x, w])
jt.sync([cy, closs, cdx, cdw])
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: cudnn_conv.*)")
assert len(logs)==3 and "oihw" in logs[0][0], logs
assert np.allclose(y.data, cy.data)
assert np.allclose(dx.data, cdx.data, 1e-2)
assert np.allclose(dw.data, cdw.data, 1e-2)
check([10,3,100,100], [5,3,3,3], stride=2, padding=0, dilation=1)
check([10,4,40,50], [5,4,5,5], stride=1, padding=1, dilation=1)
check([10,4,40,50], [5,4,4,4], stride=3, padding=1, dilation=1)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 19:26:59 2021
@author: ALEX BACK
LONGEST COLLATZ SEQUENCE
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains
10 terms. Although it has not been proved yet (Collatz Problem), it is thought
that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
"""
# IMPORTING
from datetime import datetime as date
# FUNCTIONS
def nextCollatz(n):
if n % 2 == 0:
n /= 2
else :
n = 3 * n + 1
return n
def listCollatzSequence(n):
sequence =[n]
while sequence[-1] > 1 : sequence.append(nextCollatz(sequence[-1]))
return sequence
# INPUTS
maior = 1
start = date.now()
# PROCESSING
for i in range(1,1000000):
actualSeq = listCollatzSequence(i)
if len(actualSeq) > maior :
maior = len(actualSeq)
# OUTPUT
print(i, maior, date.now() - start)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppSilanApigraythreeQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppSilanApigraythreeQueryResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayOpenAppSilanApigraythreeQueryResponse, self).parse_response_content(response_content)
|
import os
import pathlib
from dotenv import load_dotenv
from backend.utils.pUtils import PUtils
_current_file_path = pathlib.Path(__file__).parent.absolute()
def parse_dotenv():
base_dir = PUtils.bp(_current_file_path, '..', '..')
env = PUtils.bp(base_dir, '.env')
env_example = PUtils.bp(base_dir, '.env.example')
load_dotenv(env if PUtils.is_file_exists(env) else env_example)
_SECRET_KEY = os.getenv("SECRET_KEY")
if not _SECRET_KEY:
raise Exception('Secret key is not provided')
_DEBUG = os.getenv('DEBUG', 'True') == 'True'
_DB_NAME = os.getenv('DB_NAME')
_DB_USER = os.getenv('DB_USER')
_DB_PASSWORD = os.getenv('DB_PASSWORD')
_DB_HOST = os.getenv('DB_HOST')
_DB_PORT = os.getenv('DB_PORT')
if not _DB_NAME or not _DB_USER or not _DB_PASSWORD or not _DB_HOST or not _DB_PORT:
raise Exception('Database connection is not provided')
_DB_PORT = int(_DB_PORT)
_SECURE_ADMIN_URL = os.getenv('SECURE_ADMIN')
SUPER_USER = {
'first_name': os.getenv('SUPERUSER_FIRST_NAME'),
'last_name': os.getenv('SUPERUSER_LAST_NAME'),
'snils': os.getenv('SUPERUSER_SNILS'),
'gender': os.getenv('SUPERUSER_GENDER'),
'email': os.getenv('SUPERUSER_EMAIL'),
'password': os.getenv('SUPERUSER_PASSWORD')
}
return _SECRET_KEY, _DEBUG, _DB_NAME, _DB_USER, _DB_PASSWORD, _DB_HOST, _DB_PORT, \
_SECURE_ADMIN_URL, SUPER_USER
|
from .dbHelper import Sessionmaker, initSessionMaker
|
"""
@Author: NguyenKhacThanh
"""
from flask import request
from flask_restplus import Resource, Namespace
from wipm.services import regression as serv_regression
from ._requests import regression as req_regression
from ._responses import regression as res_regression
from ._responses import BASE_RES
NS = Namespace("regression", description="Regression algorithm")
@NS.route("/huber", methods=["POST", "PUT"])
class HuberCreate(Resource):
"""Huber Model resource class
"""
@NS.expect(req_regression.CREATE_HUBER_MODEL_PARAMS, validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create huber regression model
"""
params = request.get_json()
id_model = serv_regression.create_huber_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train huber regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
@NS.route("/lasso", methods=["POST", "PUT"])
class LassoCreate(Resource):
""""Lasso Model resource class
"""
@NS.expect(req_regression.CREATE_LASSO_MODEL_PARAMS, validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create lasso regression model
"""
params = request.get_json()
id_model = serv_regression.create_lasso_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train lasso regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
@NS.route("/linear_regression", methods=["POST", "PUT"])
class LinearRegressionCreate(Resource):
"""Linear Regression Model resource class
"""
@NS.expect(req_regression.CREATE_LINEAR_REGRESSION_MODEL_PARAMS, validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, code=200, description="SUCCESS")
def post(self):
"""Create linear regression model
"""
params = request.get_json()
id_model = serv_regression.create_linear_regression_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train linear regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
pass
@NS.route("/decision_tree", methods=["POST", "PUT"])
class DecisionTreeCreate(Resource):
"""Decision Tree Regression Model resource class
"""
@NS.expect(req_regression.CREATE_DECISION_TREE_MODEL_PARAMS, validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create decision tree regression model
"""
params = request.get_json()
id_model = serv_regression.create_decision_tree_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train decision tree regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
@NS.route("/random_forest", methods=["POST", "PUT"])
class RandomForestCreate(Resource):
"""Random Forest Model resource class
"""
@NS.expect(req_regression.CREATE_RANDOM_FOREST_MODEL_PARAMS,
validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create random forest regression model
"""
params = request.get_json()
id_model = serv_regression.create_random_forest_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train random forset regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
@NS.route("/xgboost", methods=["POST", "PUT"])
class XGBoostCreate(Resource):
"""XGBoost Model resource class
"""
@NS.expect(req_regression.CREATE_XGBOOST_MODEL_PARAMS, validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create huber regression model
"""
params = request.get_json()
id_model = serv_regression.create_xgboost_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_MODEL_BASE_RES, description="SUCCESS")
def put(self):
"""Train xgboost regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error = serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error
}
@NS.route("/neural_network", methods=["POST", "PUT"])
class NeuralNetworkCreate(Resource):
"""Neural Network Model resource class
"""
@NS.expect(req_regression.CREATE_NEURAL_NETWORK_MODEL_PARAMS,
validate=True)
@NS.marshal_with(res_regression.CREATE_MODEL_RES, description="SUCCESS")
def post(self):
"""Create neural network regression model
"""
params = request.get_json()
id_model = serv_regression.create_neural_network_model(**params)
return {"id": id_model, "message": "SUCCESS"}
@NS.expect(req_regression.TRAIN_MODEL_PARAMS)
@NS.marshal_with(res_regression.TRAIN_NEURAL_NETWORK_MODEL_RES,
description="SUCCESS")
def put(self):
"""Train neural network regression model
"""
params = request.get_json()
mean_squared_error, mean_absolute_error, loss_curve = \
serv_regression.train_estimator(**params)
return {
"mean_squared_error": mean_squared_error,
"mean_absolute_error": mean_absolute_error,
"loss_curve": loss_curve
}
@NS.route("/<string:id_model>", methods=["DELETE"])
class DeleteModel(Resource):
"""Delete model
"""
@NS.marshal_with(BASE_RES, description="SUCCESS")
def delete(self, id_model):
"""Delete model api
"""
serv_regression.delete_estimator(id_model)
return {"message": "Delete success"}
|
'''This module tests our quadratic discriminant classifier.'''
from src.classification.qda import QDA
import numpy as np
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import datasets
def test_prior():
'''Check we are computing priors for QDA/LDA correctly.'''
output = np.array([5, 4, 3, 2, 1, 3, 4, 5, 1, 4])
assert QDA.prior(output, 3) == 2/len(output)
assert QDA.prior(output, 5) == 2/len(output)
assert QDA.prior(output, 4) == 3/len(output)
assert QDA.prior(output, 2) == 1/len(output)
def test_class_cov():
'''Check our computation of a covariance matrix.'''
n = 10
d = 4
trials = 5
# Compare with numpy_cov
for _ in range(trials):
features = np.random.rand(n, d)
center = np.mean(features, axis = 0)
centered_features = features - center
aklearn_cov = (n - 1)*QDA.class_covariance(centered_features)
numpy_cov = (n - 1)*np.cov(centered_features, rowvar = False)
assert np.allclose(numpy_cov, aklearn_cov)
def test_with_sklearn():
iris = datasets.load_iris()
X = iris.data
y = iris.target
model = QDA(X, y, split_proportion=1)
qda = QuadraticDiscriminantAnalysis()
skmodel = qda.fit(model.train_features, model.train_output)
skpredictions = skmodel.predict(model.train_features)
akpredictions = model.train_predictions
assert np.allclose(skpredictions, akpredictions)
# Using small n causes collinearity/errors.
n = 50
d = 4
num_classes = 3
trials = 20
for _ in range(trials):
X = 5*np.random.rand(n, d)
y = np.random.randint(0, num_classes, n)
model = QDA(X, y, split_proportion=1)
qda = QuadraticDiscriminantAnalysis()
skmodel = qda.fit(model.train_features, model.train_output)
skpredictions = skmodel.predict(model.train_features)
akpredictions = model.train_predictions
proportion_agree = np.sum(skpredictions == akpredictions) / n
assert proportion_agree == 1
|
symbols = []
exports = [{'type': 'function', 'name': 'InstallNTDSProvider', 'address': '0x7ffb19f04b90'}, {'type': 'function', 'name': 'NSPStartup', 'address': '0x7ffb19f05200'}, {'type': 'function', 'name': 'RemoveNTDSProvider', 'address': '0x7ffb19f06860'}]
|
# Time: O(1)
# Space: O(h), h is height of binary tree
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BSTIterator(object):
# @param root, a binary search tree's root node
def __init__(self, root):
self.stack = []
self.cur = root
# @return a boolean, whether we have a next smallest number
def hasNext(self):
return self.stack or self.cur
# @return an integer, the next smallest number
def next(self):
while self.cur:
self.stack.append(self.cur)
self.cur = self.cur.left
self.cur = self.stack.pop()
node = self.cur
self.cur = self.cur.right
return node.val
|
from .development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ":memory:",
}
}
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Neuron.Neuron import Neuron
## Imports
from random import randint
class Perceptron(Neuron):
def __init__(self, input_range, Validator):
self.input_range = input_range
super().__init__([ [randint(0, 10) for i in range(2)] ] * self.input_range, randint(0, 10), Validator)
|
class Song(object):
def __init__(self,lyrics):
self.lyrics=lyrics;
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
happy_bday=Song(["Happy birthday to you","I don't want to get sued","So,I'll stop right there"]
)
bulls_on_parade=Song(["They rally around the family","With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict
import pickle
import json
import os
import redis
from retriever.entitylinking.entity_linker import EntityLinker
from retriever.schema_retriever.client import DenseSchemaRetrieverClient
from retriever import utils
from retriever.configs import config_utils
class KBRetriever(object):
def __init__(self, config):
if not isinstance(config, Dict):
config = config_utils.get_config(config)
self.config = config
self.entity_linker = EntityLinker(config=config)
self.schema_retriever_client = DenseSchemaRetrieverClient(config)
print("---------------------")
print("Used KBRetriever configuration:\n", self.config)
print("---------------------")
kb_store = config['kb_store_host']
self.entity_meta_info_redis = redis.Redis(host=kb_store, port=config['entity_meta_port'], db=0)
self.in_anchor_redis = redis.Redis(host=kb_store, port=config['in_relation_port'], db=0)
self.out_anchor_redis = redis.Redis(host=kb_store, port=config['out_relation_port'], db=0)
# Test Redis is up
self.test_redis_instances()
self.schema_meta_info = self.load_pickle_file(os.path.join(config["base_data_dir"], config["schema_meta_info_fn"]))
def test_redis_instances(self):
try:
val = self.entity_meta_info_redis.get("")
val = self.in_anchor_redis.get("")
val = self.out_anchor_redis.get("")
except (ConnectionError, redis.exceptions.ConnectionError) as e:
print(f'>>> Redis instance not up')
raise EnvironmentError(f'Redis instances not up. {str(e)}')
@staticmethod
def load_pickle_file(pkl_fn):
with open(pkl_fn, mode="rb") as fp:
obj = pickle.load(fp)
return obj
def get_entity_meta_info(self, entity_id):
response = self.entity_meta_info_redis.get(entity_id)
if response:
return json.loads(response)
else:
return {}
def pack_one_node(self, nid, node_type, id, score=0.0, offset=None):
if node_type == "entity":
meta_info = self.get_entity_meta_info(id)
else:
meta_info = self.schema_meta_info.get(id, {})
friendly_name = meta_info.get("en_label", "NIL")
cls = meta_info.get("prominent_type", [])
if len(cls) > 0:
cls = cls[0]
else:
cls = ""
classes = set(meta_info.get("types", {id}))
if cls != "":
classes = [cls] + list(classes - {cls})
else:
classes = list(classes)
return {
"nid": nid,
"node_type": node_type,
"id": id,
"class": cls,
"score": score,
"offset": offset,
"classes": classes,
"friendly_name": friendly_name,
"question_node": 0,
"function": 'none'
}
def pack_dense_embedding_query_graph(self, entity_list, classes, relations, node_id=0):
nodes = []
edges = []
for entity in entity_list:
node = self.pack_one_node(node_id, "entity", entity.ent_id, entity.score, entity.offset)
node_id += 1
nodes.append(node)
for cls in classes:
node = self.pack_one_node(node_id, "class", cls[0], cls[1])
nodes.append(node)
node_id += 1
for rel in relations:
edge = self.pack_edge(-1, -1, rel[0], rel[1])
edges.append(edge)
return nodes, edges
def pack_edge(self, start, end, relation, score):
return {
"start": start,
"end": end,
"relation": relation,
"score": score,
"friendly_name": self.get_relation_name(relation)
}
def get_relation_name(self, relation):
if relation in self.schema_meta_info and self.schema_meta_info[relation]["en_label"]:
return self.schema_meta_info[relation]["en_label"]
relation_name = relation.split('.')[-1].split("_")
return " ".join(["{}{}".format(word[0].upper(), word[1:]) for word in relation_name])
def is_entity(self, s):
if s.startswith('m.') or s.startswith('g.'):
return True
def get_in_relations(self, entity: str):
response = self.in_anchor_redis.get(entity)
in_relations = []
if response:
in_relations = json.loads(response)['in_relations']
return in_relations
def get_out_relations(self, entity: str):
response = self.out_anchor_redis.get(entity)
out_relations = []
if response:
out_relations = json.loads(response)['out_relations']
return out_relations
def gen_anchor_relations(self, entity: str):
in_relations = self.get_in_relations(entity)
out_relations = self.get_out_relations(entity)
return in_relations, out_relations
def predict(self, sentence, world=None):
if world is None:
world = self.config['world']
sentence = sentence.lower()
el_output = self.entity_linker.predict(sentence, topk=self.config['topk'])
literal_nodes, node_id = utils.gen_all_literal_nodes(el_output, world=world)
entity_list = utils.get_prior_el_topk(el_output)
types, relations = self.schema_retriever_client.predict(sentence, world=world)
nodes, edges = self.pack_dense_embedding_query_graph(entity_list, types, relations, node_id=node_id)
in_out_relations = {}
for ent in entity_list:
in_relation, out_relation = self.gen_anchor_relations(ent.ent_id)
in_out_relations[ent.ent_id] = {
"in_relation": list(in_relation),
"out_relation": list(out_relation)
}
output = {}
output["bert_tokens"] = el_output["tokens"]
output["graph_query"] = {
"nodes": literal_nodes + nodes,
"edges": edges
}
output["anchor_relations"] = in_out_relations
output["entity_meta_info"] = {}
for ent in entity_list:
meta_info = self.get_entity_meta_info(ent.ent_id)
en_label = meta_info.get("en_label", "NIL")
if len(meta_info.get("prominent_type", [])) > 0:
ent_type = meta_info["prominent_type"][0]
else:
ent_type = ""
ent_desc = meta_info.get("en_desc", "")
output["entity_meta_info"][ent.ent_id] = {
"ent_name": en_label,
"ent_type": ent_type,
"ent_desc": ent_desc
}
return output
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""""Airtest图像识别专用."""
import os
import sys
import time
import types
from six import PY3
from copy import deepcopy
from airtest import aircv
from airtest.aircv import cv2
from airtest.core.helper import G, logwrap
from airtest.core.settings import Settings as ST # noqa
from airtest.core.error import TargetNotFoundError, InvalidMatchingMethodError
from airtest.utils.transform import TargetPos
from airtest.aircv.template_matching import TemplateMatching
from airtest.aircv.multiscale_template_matching import MultiScaleTemplateMatching,MultiScaleTemplateMatchingPre
from airtest.aircv.keypoint_matching import KAZEMatching, BRISKMatching, AKAZEMatching, ORBMatching
from airtest.aircv.keypoint_matching_contrib import SIFTMatching, SURFMatching, BRIEFMatching
MATCHING_METHODS = {
"tpl": TemplateMatching,
"mstpl": MultiScaleTemplateMatchingPre,
"gmstpl": MultiScaleTemplateMatching,
"kaze": KAZEMatching,
"brisk": BRISKMatching,
"akaze": AKAZEMatching,
"orb": ORBMatching,
"sift": SIFTMatching,
"surf": SURFMatching,
"brief": BRIEFMatching,
}
@logwrap
def loop_find(query, timeout=ST.FIND_TIMEOUT, threshold=None, interval=0.5, intervalfunc=None):
"""
Search for image template in the screen until timeout
Args:
query: image template to be found in screenshot
timeout: time interval how long to look for the image template
threshold: default is None
interval: sleep interval before next attempt to find the image template
intervalfunc: function that is executed after unsuccessful attempt to find the image template
Raises:
TargetNotFoundError: when image template is not found in screenshot
Returns:
TargetNotFoundError if image template not found, otherwise returns the position where the image template has
been found in screenshot
"""
G.LOGGING.info("Try finding: %s", query)
start_time = time.time()
while True:
screen = G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)
if screen is None:
G.LOGGING.warning("Screen is None, may be locked")
else:
if threshold:
query.threshold = threshold
match_pos = query.match_in(screen)
if match_pos:
try_log_screen(screen)
return match_pos
if intervalfunc is not None:
intervalfunc()
# 超时则raise,未超时则进行下次循环:
if (time.time() - start_time) > timeout:
try_log_screen(screen)
raise TargetNotFoundError('Picture %s not found in screen' % query)
else:
time.sleep(interval)
@logwrap
def try_log_screen(screen=None, quality=None, max_size=None):
"""
Save screenshot to file
Args:
screen: screenshot to be saved
quality: The image quality, default is ST.SNAPSHOT_QUALITY
max_size: the maximum size of the picture, e.g 1200
Returns:
{"screen": filename, "resolution": aircv.get_resolution(screen)}
"""
if not ST.LOG_DIR or not ST.SAVE_IMAGE:
return
if not quality:
quality = ST.SNAPSHOT_QUALITY
if not max_size:
max_size = ST.IMAGE_MAXSIZE
if screen is None:
screen = G.DEVICE.snapshot(quality=quality)
filename = "%(time)d.jpg" % {'time': time.time() * 1000}
filepath = os.path.join(ST.LOG_DIR, filename)
if screen is not None:
aircv.imwrite(filepath, screen, quality, max_size=max_size)
return {"screen": filename, "resolution": aircv.get_resolution(screen)}
return None
class Template(object):
"""
picture as touch/swipe/wait/exists target and extra info for cv match
filename: pic filename
target_pos: ret which pos in the pic
record_pos: pos in screen when recording
resolution: screen resolution when recording
rgb: 识别结果是否使用rgb三通道进行校验.
scale_max: 多尺度模板匹配最大范围.
scale_step: 多尺度模板匹配搜索步长.
"""
def __init__(self, filename, threshold=None, target_pos=TargetPos.MID, record_pos=None, resolution=(), rgb=False, scale_max=800, scale_step=0.005):
self.filename = filename
self._filepath = None
self.threshold = threshold or ST.THRESHOLD
self.target_pos = target_pos
self.record_pos = record_pos
self.resolution = resolution
self.rgb = rgb
self.scale_max = scale_max
self.scale_step = scale_step
@property
def filepath(self):
if self._filepath:
return self._filepath
for dirname in G.BASEDIR:
filepath = os.path.join(dirname, self.filename)
if os.path.isfile(filepath):
self._filepath = filepath
return self._filepath
return self.filename
def __repr__(self):
filepath = self.filepath if PY3 else self.filepath.encode(sys.getfilesystemencoding())
return "Template(%s)" % filepath
def match_in(self, screen):
match_result = self._cv_match(screen)
G.LOGGING.debug("match result: %s", match_result)
if not match_result:
return None
focus_pos = TargetPos().getXY(match_result, self.target_pos)
return focus_pos
def match_all_in(self, screen):
image = self._imread()
image = self._resize_image(image, screen, ST.RESIZE_METHOD)
return self._find_all_template(image, screen)
@logwrap
def _cv_match(self, screen):
# in case image file not exist in current directory:
ori_image = self._imread()
image = self._resize_image(ori_image, screen, ST.RESIZE_METHOD)
ret = None
for method in ST.CVSTRATEGY:
# get function definition and execute:
func = MATCHING_METHODS.get(method, None)
if func is None:
raise InvalidMatchingMethodError("Undefined method in CVSTRATEGY: '%s', try 'kaze'/'brisk'/'akaze'/'orb'/'surf'/'sift'/'brief' instead." % method)
else:
if method in ["mstpl", "gmstpl"]:
ret = self._try_match(func, ori_image, screen, threshold=self.threshold, rgb=self.rgb, record_pos=self.record_pos,
resolution=self.resolution, scale_max=self.scale_max, scale_step=self.scale_step)
else:
ret = self._try_match(func, image, screen, threshold=self.threshold, rgb=self.rgb)
if ret:
break
return ret
@staticmethod
def _try_match(func, *args, **kwargs):
G.LOGGING.debug("try match with %s" % func.__name__)
try:
ret = func(*args, **kwargs).find_best_result()
except aircv.NoModuleError as err:
G.LOGGING.warning("'surf'/'sift'/'brief' is in opencv-contrib module. You can use 'tpl'/'kaze'/'brisk'/'akaze'/'orb' in CVSTRATEGY, or reinstall opencv with the contrib module.")
return None
except aircv.BaseError as err:
G.LOGGING.debug(repr(err))
return None
else:
return ret
def _imread(self):
return aircv.imread(self.filepath)
def _find_all_template(self, image, screen):
return TemplateMatching(image, screen, threshold=self.threshold, rgb=self.rgb).find_all_results()
def _find_keypoint_result_in_predict_area(self, func, image, screen):
if not self.record_pos:
return None
# calc predict area in screen
image_wh, screen_resolution = aircv.get_resolution(image), aircv.get_resolution(screen)
xmin, ymin, xmax, ymax = Predictor.get_predict_area(self.record_pos, image_wh, self.resolution, screen_resolution)
# crop predict image from screen
predict_area = aircv.crop_image(screen, (xmin, ymin, xmax, ymax))
if not predict_area.any():
return None
# keypoint matching in predicted area:
ret_in_area = func(image, predict_area, threshold=self.threshold, rgb=self.rgb)
# calc cv ret if found
if not ret_in_area:
return None
ret = deepcopy(ret_in_area)
if "rectangle" in ret:
for idx, item in enumerate(ret["rectangle"]):
ret["rectangle"][idx] = (item[0] + xmin, item[1] + ymin)
ret["result"] = (ret_in_area["result"][0] + xmin, ret_in_area["result"][1] + ymin)
return ret
def _resize_image(self, image, screen, resize_method):
"""模板匹配中,将输入的截图适配成 等待模板匹配的截图."""
# 未记录录制分辨率,跳过
if not self.resolution:
return image
screen_resolution = aircv.get_resolution(screen)
# 如果分辨率一致,则不需要进行im_search的适配:
if tuple(self.resolution) == tuple(screen_resolution) or resize_method is None:
return image
if isinstance(resize_method, types.MethodType):
resize_method = resize_method.__func__
# 分辨率不一致则进行适配,默认使用cocos_min_strategy:
h, w = image.shape[:2]
w_re, h_re = resize_method(w, h, self.resolution, screen_resolution)
# 确保w_re和h_re > 0, 至少有1个像素:
w_re, h_re = max(1, w_re), max(1, h_re)
# 调试代码: 输出调试信息.
G.LOGGING.debug("resize: (%s, %s)->(%s, %s), resolution: %s=>%s" % (
w, h, w_re, h_re, self.resolution, screen_resolution))
# 进行图片缩放:
image = cv2.resize(image, (w_re, h_re))
return image
class Predictor(object):
"""
this class predicts the press_point and the area to search im_search.
"""
DEVIATION = 100
@staticmethod
def count_record_pos(pos, resolution):
"""计算坐标对应的中点偏移值相对于分辨率的百分比."""
_w, _h = resolution
# 都按宽度缩放,针对G18的实验结论
delta_x = (pos[0] - _w * 0.5) / _w
delta_y = (pos[1] - _h * 0.5) / _w
delta_x = round(delta_x, 3)
delta_y = round(delta_y, 3)
return delta_x, delta_y
@classmethod
def get_predict_point(cls, record_pos, screen_resolution):
"""预测缩放后的点击位置点."""
delta_x, delta_y = record_pos
_w, _h = screen_resolution
target_x = delta_x * _w + _w * 0.5
target_y = delta_y * _w + _h * 0.5
return target_x, target_y
@classmethod
def get_predict_area(cls, record_pos, image_wh, image_resolution=(), screen_resolution=()):
"""Get predicted area in screen."""
x, y = cls.get_predict_point(record_pos, screen_resolution)
# The prediction area should depend on the image size:
if image_resolution:
predict_x_radius = int(image_wh[0] * screen_resolution[0] / (2 * image_resolution[0])) + cls.DEVIATION
predict_y_radius = int(image_wh[1] * screen_resolution[1] / (2 * image_resolution[1])) + cls.DEVIATION
else:
predict_x_radius, predict_y_radius = int(image_wh[0] / 2) + cls.DEVIATION, int(image_wh[1] / 2) + cls.DEVIATION
area = (x - predict_x_radius, y - predict_y_radius, x + predict_x_radius, y + predict_y_radius)
return area
|
# encoding: utf-8
import os
import sys
import tkinter as tk
from tkinter import ttk, font, messagebox, Image, filedialog
import Compi2RepoAux.team21.Analisis_Ascendente.ascendente as parser
import webbrowser as wb
# from PIL import Image,ImageTk
# vscode://vscode.github-authentication/did-authenticate?windowId=1&code=31765953f382697fc389&state=b734c53a-ca11-4477-9538-dad90e23013c
class Ctxt(tk.Text): # Custom Text Widget with Highlight Pattern - - - - -
# Credits to the owner of this custom class - - - - - - - - - - - - -
def __init__(self, *args, **kwargs):
tk.Text.__init__(self, *args, **kwargs, bg='#323331', fg="#FDFEFD")
# tk.Text.configure('name','nuevo_0')
def highlight_pattern(self, pattern, tag, start="1.0", end="end", regexp=False):
start = self.index(start)
end = self.index(end)
self.mark_set("matchStart", start)
self.mark_set("matchEnd", start)
self.mark_set("searchLimit", end)
count = tk.IntVar()
while True:
index = self.search(
pattern, "matchEnd", "searchLimit", count=count, regexp=regexp
)
if index == "":
break
self.mark_set("matchStart", index)
self.mark_set("matchEnd", "%s+%sc" % (index, count.get()))
self.tag_remove("id", "matchStart", "matchEnd")
self.tag_remove("norm", "matchStart", "matchEnd")
self.tag_remove("cadena", "matchStart", "matchEnd")
self.tag_remove("green", "matchStart", "matchEnd")
self.tag_remove("blue", "matchStart", "matchEnd")
self.tag_add(tag, "matchStart", "matchEnd")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class TextLineNumbers(tk.Canvas):
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs, highlightthickness=2)
self.textwidget = None
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, *args):
'''redraw line numbers'''
self.delete("all")
i = self.textwidget.index("@0,0")
while True:
dline = self.textwidget.dlineinfo(i)
if dline is None:
break
y = dline[1]
linenum = str(i).split(".")[0]
self.create_text(2, y, anchor="nw", text=linenum, fill="#FDFEFD")
i = self.textwidget.index("%s+1line" % i)
class CreateToolTip(object):
"""
create a tooltip for a given widget
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500 # miliseconds
self.wraplength = 180 # pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
# testing ...
class Application(ttk.Frame):
def __init__(self, ventana, iconos):
super().__init__(ventana)
self.lista = [
"mode",
"smallint",
"int",
"integer",
"bigint",
"decimal",
"numeric",
"real",
"double",
"money",
"character",
"varying",
"varchar",
"char",
"text",
"timestamp",
"date",
"time",
"interval",
"boolean",
"create",
"type",
"as",
"enum",
"between",
"in",
"like",
"ilike",
"similar",
"is",
"null",
"not",
"and",
"or",
"show",
"databases",
"use",
"database",
"alter",
"rename",
"to",
"owner",
"table",
"drop",
"delete",
"if",
"exists",
"default",
"constraint",
"unique",
"check",
"primary",
"foreign",
"key",
"references",
"add",
"column",
"set",
"from",
"only",
"using",
"where",
"returning",
"inherits",
"insert",
"into",
"values",
"update",
"select",
"distinct",
"group",
"order",
"by",
"having",
"count",
"sum",
"avg",
"max",
"min",
"inner",
"left",
"right",
"full",
"outer",
"join",
"asc",
"desc"
]
self.contadorN = 1
self.Copiado = ""
ventana.title("Query Tool")
# anchoxAlto
ventana.geometry("1200x700")
# width, height
ventana.resizable(False, False)
ventana.iconbitmap("./Images/icono.ico")
ventana.configure(background="RoyalBlue1")
# cambiar icono
menu = tk.Menu(ventana)
new_item = tk.Menu(menu, tearoff=0)
new_item.add_command(label="Cerrar Todo", command=self.f_CerrarTodo)
new_item.add_command(label="Exit", command=self.f_exit)
menu.add_cascade(label='Archivo', menu=new_item)
new_item2 = tk.Menu(menu, tearoff=0)
new_item2.add_command(label="Nuevo", command=self.f_nuevaPestania)
new_item2.add_command(label="Abrir", command=self.f_abrir)
new_item2.add_command(label="Guardar", command=self.f_guardar)
new_item2.add_command(label="Guardar Como", command=self.f_guardarcomo)
menu.add_cascade(label='Project', menu=new_item2)
new_item3 = tk.Menu(menu, tearoff=0)
new_item3.add_command(
label="Copiar", accelerator="Ctrl+c", command=lambda: self.tab_control.event_generate('<Control-c>'))
new_item3.add_command(label="Cortar", accelerator="Ctrl+x",
command=lambda: self.tab_control.event_generate('<Control-x>'))
new_item3.add_command(label="Pegar", accelerator="Ctrl+v",
command=lambda: self.tab_control.event_generate('<Control-v>'))
menu.add_cascade(label='Edit', menu=new_item3)
new_item4 = tk.Menu(menu, tearoff=0)
new_item4.add_command(label="Correr", command=self.f_correr)
menu.add_cascade(label='Programa', menu=new_item4)
new_item5 = tk.Menu(menu, tearoff=0)
new_item5.add_command(label="Acerca")
menu.add_cascade(label='Ayuda', menu=new_item5)
# barra de herramientas----------------------------------------------------------------
BarraH = tk.Frame()
BarraH.config(bg="DodgerBlue4", relief=tk.RAISED, height="100", bd=2)
imgBoton1 = tk.PhotoImage(file=iconos[0])
bot1 = tk.Button(BarraH, image=imgBoton1,
height=50, width=50, command=self.f_CerrarTodo)
bot1.pack(side=tk.LEFT, padx=3, pady=3)
button1_ttp = CreateToolTip(bot1, 'CERRAR TODO')
imgBoton2 = tk.PhotoImage(file=iconos[1])
bot2 = tk.Button(BarraH, image=imgBoton2,
height=50, width=50, command=self.f_nuevaPestania)
bot2.pack(side=tk.LEFT, padx=3, pady=3)
button2_ttp = CreateToolTip(bot2, 'NUEVO')
imgBoton3 = tk.PhotoImage(file=iconos[2])
bot3 = tk.Button(BarraH, image=imgBoton3,
height=50, width=50, command=self.f_abrir)
bot3.pack(side=tk.LEFT, padx=3, pady=3)
button3_ttp = CreateToolTip(bot3, 'ABRIR')
imgBoton5 = tk.PhotoImage(file=iconos[4])
bot5 = tk.Button(BarraH, image=imgBoton5,
height=50, width=50, command=self.f_guardarcomo)
bot5.pack(side=tk.LEFT, padx=3, pady=3)
button5_ttp = CreateToolTip(bot5, 'GUARDAR COMO')
imgBoton6 = tk.PhotoImage(file=iconos[5])
bot6 = tk.Button(BarraH, image=imgBoton6,
height=50, width=50, command=self.f_correr)
bot6.pack(side=tk.LEFT, padx=3, pady=3)
button6_ttp = CreateToolTip(bot6, 'RUN')
imgBoton7 = tk.PhotoImage(file=iconos[6])
bot7 = tk.Button(BarraH, image=imgBoton7,
height=50, width=50, command=self.f_abrirSintactico)
bot7.pack(side=tk.LEFT, padx=3, pady=3)
button7_ttp = CreateToolTip(bot7, 'REPORTE SINTACTICO')
bot8 = tk.Button(BarraH, image=imgBoton7,
height=50, width=50, command=self.f_abrirLexico)
bot8.pack(side=tk.LEFT, padx=3, pady=3)
button8_ttp = CreateToolTip(bot8, 'REPORTE LEXICO')
bot9 = tk.Button(BarraH, image=imgBoton7,
height=50, width=50, command=self.f_abrirtablaSemanticos)
bot9.pack(side=tk.LEFT, padx=3, pady=3)
button9_ttp = CreateToolTip(bot9, 'REPORTE SEMANTICO')
imgBoton8 = tk.PhotoImage(file=iconos[7])
bot10 = tk.Button(BarraH, image=imgBoton8,
height=50, width=50, command=self.f_abrirAST)
bot10.pack(side=tk.LEFT, padx=3, pady=3)
button10_ttp = CreateToolTip(bot10, 'REPORTE AST')
imgBoton9 = tk.PhotoImage(file=iconos[8])
bot11 = tk.Button(BarraH, image=imgBoton9,
height=50, width=50, command=self.f_abrirBNFascendente)
bot11.pack(side=tk.LEFT, padx=3, pady=3)
button11_ttp = CreateToolTip(bot11, 'REPORTE BNF ASCENDENTE')
bot12 = tk.Button(BarraH, image=imgBoton9,
height=50, width=50, command=self.f_abrirBNFdescendente)
bot12.pack(side=tk.LEFT, padx=3, pady=3)
button12_ttp = CreateToolTip(bot12, 'REPORTE BNF DESCENDENTE')
imgBoton10 = tk.PhotoImage(file=iconos[9])
bot13 = tk.Button(BarraH, image=imgBoton10,
height=50, width=50, command=self.f_abrirtablaSimbolos)
bot13.pack(side=tk.LEFT, padx=3, pady=3)
button13_ttp = CreateToolTip(bot13, 'REPORTE TABLA DE SIMBOLOS')
# PESTAniAS ----------------------------------------------------------------------------
PanelPestania = tk.Frame()
PanelPestania.config(
bg="SteelBlue1", relief=tk.RAISED, height="400", bd=2)
self.tab_control = ttk.Notebook(PanelPestania)
self.tab_control.config(height="300")
self.f_nuevaPestania()
# CONSOLA-------------------------------------------------------------------------------
Consola = tk.Frame()
Consola.config(bg="SteelBlue1", relief=tk.RAISED, height="700", bd=2)
S = tk.Scrollbar(Consola)
self.T = tk.Text(Consola, height=70, width=4,
bg="black", fg="chartreuse2")
S.pack(side=tk.RIGHT, fill=tk.Y)
self.T.pack(side=tk.TOP, fill=tk.X)
S.config(command=self.T.yview)
self.T.config(yscrollcommand=S.set)
quote = """>>>\n"""
self.T.insert(tk.END, quote)
# menucontextual----------------------------------------------
menuContext = tk.Menu(self.tab_control, tearoff=0)
menuContext.add_command(label="Cerrar", command=self.f_cerrarPestania)
# eventos-----------------------------------------------------
def f_key(event):
if(event.keycode == 13):
#ver esto
global T
T.insert(tk.END, """>>>""")
def f_mostrarContext(event):
menuContext.post(event.x_root, event.y_root)
self.T.bind("<Key>", f_key)
ventana.bind("<Button-3>", f_mostrarContext)
# metodos extras-------------------------------------------------------
BarraH.pack(side=tk.TOP, fill=tk.X)
PanelPestania.pack(side=tk.TOP, fill=tk.X, pady=10, padx=10)
Consola.pack(side=tk.TOP, fill=tk.X, pady=10, padx=10)
ventana.config(menu=menu)
ventana.mainloop()
def f_cerrarPestania(self):
# if(tab_control.tab(tab_control.select(), "text")) == "Nuevo":
respuesta = messagebox.askyesno(
title="", message="¿Desea cerrar esta pestania sin guardar?")
if(respuesta):
self.tab_control.forget(self.tab_control.select())
else:
if self.tab_control.tab(self.tab_control.select(), "text")[0] != '/':
self.f_guardarcomo()
self.tab_control.forget(self.tab_control.select())
else:
# guardar
self.f_guardar()
self.tab_control.forget(self.tab_control.select())
print("debemos guardarlo")
def f_nuevaPestania(self):
tab1 = ttk.Frame(self.tab_control, name="f_"+str(self.contadorN))
self.tab_control.add(tab1, text='nuevo_'+str(self.contadorN))
self.tab_control.pack(expand=1, fill='both')
S1 = tk.Scrollbar(tab1)
numberLines = TextLineNumbers(tab1, width=40, bg='#313335')
# T1 = tk.Text(tab1, bg="white")
T1 = Ctxt(tab1)
numberLines.attach(T1)
S1.pack(side=tk.RIGHT, fill=tk.Y)
numberLines.pack(side=tk.LEFT, fill=tk.Y, padx=(5, 0))
T1.pack(side=tk.TOP, fill='both')
S1.config(command=T1.yview)
T1.config(yscrollcommand=S1.set)
T1.tag_config("green", foreground="#0bde20")
T1.tag_config("blue", foreground="#09ebc9")
T1.tag_config("norm", foreground="white")
T1.tag_config("id", foreground="#f5ed00")
T1.tag_config("cadena", foreground="#f28900")
def onScrollPress(event):
S1.bind("<B1-Motion>", numberLines.redraw)
def onScrollRelease(event):
S1.unbind("<B1-Motion>", numberLines.redraw)
def onPressDelay(event):
self.after(2, numberLines.redraw)
T1.highlight_pattern(
"(\w|\s|\n|\r|\_|\;|\=|\+|\-|\*|\'|\(|\)|\,)", "norm", regexp=True)
T1.highlight_pattern(
"([_a-zA-Z][a-zA-Z_0-9_]*)", "id", regexp=True)
for patt in self.lista:
T1.highlight_pattern("\m"+str(patt)+"\M", "blue", regexp=True)
T1.highlight_pattern(
"\m"+str(patt).upper()+"\M", "blue", regexp=True)
T1.highlight_pattern(
"((\'.*?\')|(\".*?\"))", "cadena", regexp=True)
T1.highlight_pattern(
"((/\*(.|\n)*?\*/)|(--.*\n))", "green", regexp=True)
T1.bind("<Key>", onPressDelay)
T1.bind("<Button-1>", numberLines.redraw)
S1.bind("<Button-1>", onScrollPress)
T1.bind("<MouseWheel>", onPressDelay)
self.contadorN = self.contadorN+1
idtab = self.tab_control.index("end")-1
self.tab_control.select(idtab)
return self.contadorN-1
# print(str(self.tab_control.index(tk.END)))
def f_guardarcomo(self):
filename = filedialog.asksaveasfilename(
initialdir="./", title="Guardar Como")
tabActual = self.tab_control.tab(tk.CURRENT)['text']
if self.tab_control.tab(self.tab_control.select(), "text")[0] != '/':
Contenido = self.tab_control.children[tabActual.replace(
"nuevo", "f")].winfo_children()[2].get("1.0", tk.END)
else:
# guardar
Contenido = self.tab_control.children[tabActual].winfo_children()[
2].get("1.0", tk.END)
# escribir nuevo archivo con filename y contenido
self.WriteFile(filename, Contenido)
x = filename.split("/")
# dictionary[new_key] = dictionary.pop(old_key)
messagebox.showinfo(title="Guardar Como",
message="ARCHIVO GUARDADO CON ÉXITO")
def f_guardar(self):
tabActual = self.tab_control.tab(tk.CURRENT)['text']
#
if tabActual[0] != '/':
self.f_guardarcomo()
else:
filename = "./pruebas/" + tabActual
Contenido = self.tab_control.children[tabActual].winfo_children()[
2].get("1.0", tk.END)
# escribir nuevo archivo con filename y contenido
self.WriteFile(filename, Contenido)
messagebox.showinfo(title="Guardar",
message="ARCHIVO GUARDADO CON ÉXITO")
def f_abrir(self):
try:
with filedialog.askopenfile(initialdir="./", title="Abrir Archivo") as f:
Contenido = f.read()
x = (f.name).split("/")
name = "/"+x[len(x)-1]
tabs = self.f_nuevaPestania()
idtab = self.tab_control.index("end")-1
self.tab_control.select(idtab)
print(tabs)
print(
self.tab_control.children["f_"+str(tabs)].winfo_children()[2])
# insertar texto
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].insert(
tk.END, Contenido)
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern(
"(\w|\s|\n|\r|\_|\;|\=|\+|\-|\*|\'|\(|\)|\,)", "norm", regexp=True)
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern(
"([_a-zA-Z][a-zA-Z_0-9_]*)", "id", regexp=True)
for patt in self.lista:
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern("\m"+str(patt)+"\M", "blue", regexp=True)
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern(
"\m"+str(patt).upper()+"\M", "blue", regexp=True)
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern(
"((\'.*?\')|(\".*?\"))", "cadena", regexp=True)
self.tab_control.children["f_"+str(tabs)].winfo_children()[2].highlight_pattern(
"((/\*(.|\n)*?\*/)|(--.*\n))", "green", regexp=True)
except:
print("no hacer nada")
def WriteFile(self, filename, Content):
try:
file = open(filename, "w")
file.write(Content)
finally:
file.close()
def f_CerrarTodo(self):
print(self.tab_control.index("end"))
while self.tab_control.index("end") > 0:
self.tab_control.select(0)
self.f_cerrarPestania()
def f_exit(self):
if self.tab_control.index("end") > 0:
self.f_CerrarTodo()
sys.exit()
else:
sys.exit()
def f_abrirSintactico(self):
try:
wb.open_new(r'ErroresSintacticos.html')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirLexico(self):
try:
wb.open_new(r'ErroresLexicos.html')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirBNFascendente(self):
try:
wb.open_new(r'reporteGramatica.gv.pdf')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirBNFdescendente(self):
try:
wb.open_new(r'')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirAST(self):
try:
wb.open_new(r'AST.svg')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirtablaSimbolos(self):
try:
wb.open_new(r'Simbolos.html')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_abrirtablaSemanticos(self):
try:
wb.open_new(r'ErroresSemanticos.html')
except:
tk.messagebox.showwarning(title="This file not exists", message="Please run de program to generated the files")
def f_parsear(self, texto):
self.T.delete(1.0,tk.END)
salida= parser.ejecutarAnalisis(texto)
for output in salida:
self.T.insert(tk.END,"\n>>>"+ output)
def f_correr(self):
lista = []
tabActual = self.tab_control.tab(tk.CURRENT)['text']
if tabActual[0] != '/':
texto = self.tab_control.children[tabActual.replace(
"nuevo", "f")].winfo_children()[2].get("1.0", tk.END)
start = 0
return self.f_parsear(texto)
else:
texto = self.tab_control.children[tabActual].winfo_children()[2].get("1.0", tk.END).get(
"1.0", tk.END)
start = 0
return self.f_parsear(texto)
def main():
ventana = tk.Tk()
# INICIALIZAR VARIABLES CON RUTAS
app_carpeta = os.getcwd()
img_carpeta = app_carpeta + os.sep + "Images" + os.sep
# DECLARAR Y VERIFICAR ICONOS DE LA APLICACIÓN:
iconos = (img_carpeta + "cerrar.png",
img_carpeta + "nuevo.png",
img_carpeta + "abrir.png",
img_carpeta + "guardar.png",
img_carpeta + "guardar_como.png",
img_carpeta + "play.png",
img_carpeta + "reporte.png",
img_carpeta + "arbol.png",
img_carpeta + "bnf.png",
img_carpeta + "tabla.png"
)
app = Application(ventana, iconos)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: penta_fractal_turtle.py
# Project: Sonstige_Uebungen
# Created Date: Thursday 28.02.2019, 12:07
# Author: Apop85
# -----
# Last Modified: Friday 01.03.2019, 12:50
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description: Try of drawing a pentagram fractal
###
from turtle import *
def penta_cycle(x):
if x < 10:
return
for i in range(5):
forward(x)
right(144)
penta_cycle(x/2)
def move_to_start():
up()
back(250)
right(90)
back(50)
left(90)
down()
speed()
clear()
try:
speed(0)
move_to_start()
penta_cycle(500)
input()
except:
print('Script aborted')
|
#!/usr/bin/env python3
"""An example configuration file
"""
import sys
import os
# Assuming the cell order in the metadata tables are the same as those in the gene level matrices
# The output knn matrices follow such order as well
ka_smooth = 200
knn = 200
date = 200826
# # Configs
name = 'mop_2mods_atacrna_{}_ka{}_knn{}'.format(date, ka_smooth, knn)
outdir = '/cndd2/fangming/projects/miniatlas/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
save_knn = True # new required arguments (7/27/2020)
output_knn_within = outdir + "/knn_within_{}_{{}}.npz".format(name)
output_knn_across = outdir + "/knn_across_{}_{{}}_{{}}.npz".format(name)
# end of new required arguments (7/27/2020)
# required for downsamp (8/7/2020)
output_cells = outdir + "/cells_{{}}_{}.npy".format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_neurons'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
mods_selected = [
# 'snmcseq_gene',
'snatac_gene',
'smarter_cells',
# 'smarter_nuclei',
# '10x_cells_v2',
# '10x_cells_v3',
# '10x_nuclei_v3',
# '10x_nuclei_v3_macosko',
]
# features_selected = ['smarter_cells']
# features_selected = ['snmcseq_gene']
features_selected = ['snatac_gene']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
}
ka_smooth = ka_smooth # default: 5
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = knn
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30 # default: 30
resolutions = [0.1, 1, 2, 4]
# umap
umap_neighbors = 60
min_dist = 0.5
|
#!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2021-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
This script verifies that the services required to monitor and interact with
console logs are up and ready in the k8s cluster.
"""
import os
import logging
import subprocess
import sys
import yaml
from kubernetes import client, config
DEFAULT_LOG_LEVEL = os.environ.get("LOG_LEVEL", logging.INFO)
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
# set up logging to file
logFilePath = '/tmp/' + sys.argv[0].split('/')[-1] + '.log'
file_handler = logging.FileHandler(filename=logFilePath)
file_handler.setLevel(os.environ.get("FILE_LOG_LEVEL", DEFAULT_LOG_LEVEL))
logger.addHandler(file_handler)
# set up logging to console
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(os.environ.get("CONSOLE_LOG_LEVEL", DEFAULT_LOG_LEVEL))
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Service names expected to be found
EXPECTED_SERVICES = {
"cray-console-operator",
"cray-console-node",
"cray-console-data"
}
# filter out the postgres pods
POSTGRES_FILTER = "postgres"
class ConsoleException(Exception):
pass
def check_services_running():
# Configs can be set in Configuration class directly or using helper utility
config.load_kube_config()
foundPods = dict()
v1 = client.CoreV1Api()
ret = v1.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
podName = i.metadata.name.lower()
for expected in EXPECTED_SERVICES:
if expected in podName and not POSTGRES_FILTER in podName:
# record that we found a pod for the expected service
logger.debug(f"Checking {i.metadata.name} : {i.status.phase}")
foundPods[expected] = i.metadata.name
# need to look at that state of each container - the i.status.phase lies...
ok = True
for c in i.status.container_statuses:
# Note: when a container is in back-off state, it may either be in
# 'waiting' or 'terminated' state - consider either an error and
# gather what information we can.
if c.ready != True:
if c.state.terminated != None:
logger.error(f"Pod: {i.metadata.name} Container Terminated: {c.name}, " +
f"Exit Code: {c.state.terminated.exit_code}, " +
f"Reason: {c.state.terminated.reason}, " +
f"Message: {c.state.terminated.message}")
ok = False
if c.state.waiting != None:
logger.error(f"Pod: {i.metadata.name} Container: {c.name}, " +
f"{c.state.waiting.reason}: {c.state.waiting.message}")
ok = False
if not ok:
raise ConsoleException
# check that all expected services have been found
if not len(foundPods) == len(EXPECTED_SERVICES):
logger.error(f"The console pods in the services namespace did not match what was expected.")
logger.error(f"Expected services: {EXPECTED_SERVICES}")
logger.error(f"Found pods: {foundPods}")
raise ConsoleException
def main():
try:
return_value = True
logger.info("Beginning verification that all console services are running")
# Find that all services are present
pods = check_services_running()
logger.info("Verification of console services succeeded")
return 0
except ConsoleException:
return 1
except Exception as exc:
logger.error(f"Unexpected error verifying console services.", exc_info=exc)
return 1
if __name__ == "__main__":
sys.exit(main())
|
"""Base command for grow."""
from grow.deployments.destinations import local as local_destination
import click
import os
import pkg_resources
version = pkg_resources.get_distribution('grow').version
HELP_TEXT = ('Grow is a declarative file-based website generator. Read docs at '
'https://grow.dev. This is version {}.'.format(version))
# pylint: disable=unused-argument
@click.group(help=HELP_TEXT)
@click.version_option(prog_name='grow', version=version)
@click.option('--auth', help='Information used to sign in to services that'
' require authentication. --auth should be an email address.',
envvar='GROW_AUTH')
@click.option('--clear-auth', default=False, is_flag=True,
help='Clears stored auth information.')
@click.option('--auth-key-file', help='Path to a private key file used for'
' services that require authentication.', envvar='GROW_KEY_FILE')
@click.option(
'--interactive-auth', default=False, is_flag=True,
envvar='INTERACTIVE_AUTH',
help='Whether to automatically open an authorization page in your'
' default web browser for any steps that require authentication.'
' If you are running Grow on a machine with access to a web browser,'
' you may use --interactive-auth to automatically open the web'
' browser. By default, this option is turned off, requiring you to'
' manually copy and paste an authorization code.')
@click.option('--profile',
default=False, is_flag=True,
help='Show report of pod operation timing for performance analysis.')
def grow(auth, clear_auth, auth_key_file, interactive_auth, profile):
"""Grow CLI command."""
if interactive_auth not in (None, False):
os.environ['INTERACTIVE_AUTH'] = str(interactive_auth)
if auth is not None:
os.environ['AUTH_EMAIL_ADDRESS'] = str(auth)
if auth_key_file is not None:
os.environ['AUTH_KEY_FILE'] = str(auth_key_file)
if clear_auth:
os.environ['CLEAR_AUTH'] = '1'
@grow.resultcallback()
def process_subcommands(pod, profile, **_):
"""Handle flags that need to process after the sub command."""
if not pod:
return
if profile:
destination = local_destination.LocalDestination(
local_destination.Config())
destination.pod = pod
destination.export_profile_report()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from juriscraper.pacer import InternetArchive
from tests import TESTS_ROOT_EXAMPLES_PACER
from tests.local.PacerParseTestCase import PacerParseTestCase
class PacerParseInternetArchiveReportTest(PacerParseTestCase):
"""Tests for the IA XML docket parser"""
def setUp(self):
self.maxDiff = 200000
def test_parsing_ia_xml_files(self):
path_root = os.path.join(
TESTS_ROOT_EXAMPLES_PACER, "dockets_internet_archive"
)
self.parse_files(path_root, "*.xml", InternetArchive)
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from flask import Blueprint, render_template
# pylint: disable=invalid-name
viewsbp = Blueprint('viewsbp', __name__, url_prefix='/')
# Endpoint that fetches QRadar proxy settings if they are set, before making
# an HTTP request manually using these proxy settings to X-Force Exchange and
# displaying the results to the user injected into the HTML template.
# If no proxy settings are set an error is shown explaining that no proxy
# settings have been set.
@viewsbp.route('/index')
def index():
if 'QRADAR_HTTPS_PROXY' in os.environ:
# Manually retrieve QRadar proxy values
qradar_https_proxy = os.environ.get('QRADAR_HTTPS_PROXY')
qradar_http_proxy = os.environ.get('QRADAR_HTTP_PROXY')
qradar_no_proxy = os.environ.get('QRADAR_NO_PROXY')
# Make HTTP request using proxy values to IBM X-Force Exchange to get
# the download count for the QRadar Assistant app
# NOTE: This manual proxy set up is not the recommended way to do this,
# QRadar sets the python http_proxy and https_proxy values
# automatically, meaning that it is handled automatically and this
# manual setup is not needed - it is only to demo how to manually
# use the proxy values
proxies = {
'http': qradar_http_proxy,
'https': qradar_https_proxy,
}
response = requests.get(
'https://api.xforce.ibmcloud.com/hub/extensions/ed8aee4440f98f9c8bedaff4c5c644de',
proxies=proxies)
assistant_info = response.json()
download_count = assistant_info["extensions"][0]["downloads"]
return render_template('index.html',
proxy_set=True,
qradar_https_proxy=qradar_https_proxy,
qradar_http_proxy=qradar_http_proxy,
qradar_no_proxy=qradar_no_proxy,
download_count=download_count)
return render_template('index.html', proxy_set=False)
|
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple
from collections import OrderedDict
from coreapp.models import Asm, Assembly
from coreapp import util
from coreapp.sandbox import Sandbox
from django.conf import settings
import json
import logging
import os
from pathlib import Path
import subprocess
from dataclasses import dataclass
from platform import uname
logger = logging.getLogger(__name__)
PATH: str
if settings.USE_SANDBOX_JAIL:
PATH = "/bin:/usr/bin"
else:
PATH = os.environ["PATH"]
WINE: str
if "microsoft" in uname().release.lower() and not settings.USE_SANDBOX_JAIL:
logger.info("WSL detected & nsjail disabled: wine not required.")
WINE = ""
else:
WINE = "wine"
def load_compilers() -> Dict[str, Dict[str, str]]:
ret = {}
config_json = "config.json"
compilers_base = settings.BASE_DIR / "compilers"
compiler_dirs = next(os.walk(compilers_base))
for compiler_id in compiler_dirs[1]:
config_path = Path(compilers_base / compiler_id / config_json)
if config_path.exists():
with open(config_path) as f:
try:
config = json.load(f)
except:
logger.error(f"Error: Unable to parse {config_json} for {compiler_id}")
continue
if "cc" in config and "platform" in config:
# allow binaries to exist outside of repo
binaries_path = Path(CompilerWrapper.base_path() / compiler_id)
logger.debug(f"Valid config found for {compiler_id}. Checking {binaries_path}...")
# consider compiler binaries present if *any* non-config.json file is found
binaries = (x for x in binaries_path.glob("*") if x.name != config_json)
if next(binaries, None) != None:
logger.debug(f"Enabling {compiler_id}.")
ret[compiler_id] = config
else:
logger.debug(f"No binaries for {compiler_id}, ignoring.")
else:
logger.warning(f"Error: {compiler_id} {config_json} is missing 'cc' and/or 'platform' field(s), skipping.")
return ret
@dataclass
class Platform:
name: str
description: str
arch: str
asm_prelude: str
assemble_cmd: Optional[str] = None
objdump_cmd: Optional[str] = None
nm_cmd: Optional[str] = None
@dataclass
class CompilationResult:
elf_object: bytes
errors: str
def load_platforms() -> Dict[str, Platform]:
return {
"n64": Platform(
"Nintendo 64",
"MIPS (big-endian)",
"mips",
assemble_cmd='mips-linux-gnu-as -march=vr4300 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.macro dlabel label
.global \label
\label:
.endm
.set noat
.set noreorder
.set gp=64
"""
),
"ps1": Platform(
"PlayStation",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=r3000 -mabi=32 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"ps2": Platform(
"PlayStation 2",
"MIPS (little-endian)",
"mipsel",
assemble_cmd='mips-linux-gnu-as -march=mips64 -mabi=64 -o "$OUTPUT" "$INPUT"',
objdump_cmd="mips-linux-gnu-objdump",
nm_cmd="mips-linux-gnu-nm",
asm_prelude="""
.macro .late_rodata
.section .rodata
.endm
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set noat
.set noreorder
"""
),
"gc_wii": Platform(
"GameCube / Wii",
"PPC",
"ppc",
assemble_cmd='powerpc-eabi-as -mgekko -o "$OUTPUT" "$INPUT"',
objdump_cmd="powerpc-eabi-objdump",
nm_cmd="powerpc-eabi-nm",
asm_prelude="""
.macro glabel label
.global \label
.type \label, @function
\label:
.endm
.set r0, 0
.set r1, 1
.set r2, 2
.set r3, 3
.set r4, 4
.set r5, 5
.set r6, 6
.set r7, 7
.set r8, 8
.set r9, 9
.set r10, 10
.set r11, 11
.set r12, 12
.set r13, 13
.set r14, 14
.set r15, 15
.set r16, 16
.set r17, 17
.set r18, 18
.set r19, 19
.set r20, 20
.set r21, 21
.set r22, 22
.set r23, 23
.set r24, 24
.set r25, 25
.set r26, 26
.set r27, 27
.set r28, 28
.set r29, 29
.set r30, 30
.set r31, 31
.set f0, 0
.set f1, 1
.set f2, 2
.set f3, 3
.set f4, 4
.set f5, 5
.set f6, 6
.set f7, 7
.set f8, 8
.set f9, 9
.set f10, 10
.set f11, 11
.set f12, 12
.set f13, 13
.set f14, 14
.set f15, 15
.set f16, 16
.set f17, 17
.set f18, 18
.set f19, 19
.set f20, 20
.set f21, 21
.set f22, 22
.set f23, 23
.set f24, 24
.set f25, 25
.set f26, 26
.set f27, 27
.set f28, 28
.set f29, 29
.set f30, 30
.set f31, 31
.set qr0, 0
.set qr1, 1
.set qr2, 2
.set qr3, 3
.set qr4, 4
.set qr5, 5
.set qr6, 6
.set qr7, 7
"""
),
}
def get_assemble_cmd(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].assemble_cmd
return None
def get_nm_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].nm_cmd
return None
def get_objdump_command(platform: str) -> Optional[str]:
if platform in _platforms:
return _platforms[platform].objdump_cmd
return None
def _check_assembly_cache(*args: str) -> Tuple[Optional[Assembly], str]:
hash = util.gen_hash(args)
return Assembly.objects.filter(hash=hash).first(), hash
class CompilerWrapper:
@staticmethod
def base_path() -> Path:
return settings.COMPILER_BASE_PATH
@staticmethod
def platform_from_compiler(compiler: str) -> Optional[str]:
cfg = _compilers.get(compiler)
return cfg.get("platform") if cfg else None
@staticmethod
def arch_from_platform(platform: str) -> Optional[str]:
plt = _platforms.get(platform)
return plt.arch if plt else None
@staticmethod
def available_compiler_ids() -> List[str]:
return sorted(_compilers.keys())
@staticmethod
def available_compilers() -> Dict[str, Dict[str, Optional[str]]]:
return {k: {"platform": CompilerWrapper.platform_from_compiler(k)} for k in CompilerWrapper.available_compiler_ids()}
@staticmethod
def available_platforms() -> OrderedDict[str, Dict[str, str]]:
a_set: Set[str] = set()
ret = OrderedDict()
for id in CompilerWrapper.available_compiler_ids():
a_set.add(_compilers[id]["platform"])
for a in sorted(a_set):
ret[a] = {
"name": _platforms[a].name,
"description": _platforms[a].description,
"arch": _platforms[a].arch,
}
return ret
@staticmethod
def filter_compiler_flags(compiler: str, compiler_flags: str) -> str:
cfg = _compilers[compiler]
# Remove irrelevant flags that are part of the base compiler configs or
# don't affect matching, but clutter the compiler settings field.
# TODO: use cfg for this?
skip_flags_with_args = {
"-woff",
"-B",
"-I",
"-D",
"-U",
"-G",
}
skip_flags = {
"-ffreestanding",
"-non_shared",
"-Xcpluscomm",
"-Xfullwarn",
"-fullwarn",
"-Wab,-r4300_mul",
"-c",
"-w",
}
skip_next = False
flags = []
for flag in compiler_flags.split():
if skip_next:
skip_next = False
continue
if flag in skip_flags:
continue
if flag in skip_flags_with_args:
skip_next = True
continue
if any(flag.startswith(f) for f in skip_flags_with_args):
continue
flags.append(flag)
return " ".join(flags)
@staticmethod
@lru_cache(maxsize=settings.COMPILATION_CACHE_SIZE) # type: ignore
def compile_code(compiler: str, compiler_flags: str, code: str, context: str) -> CompilationResult:
if compiler not in _compilers:
logger.debug(f"Compiler {compiler} not found")
return CompilationResult(b'', "ERROR: Compiler not found")
code = code.replace("\r\n", "\n")
context = context.replace("\r\n", "\n")
with Sandbox() as sandbox:
code_path = sandbox.path / "code.c"
object_path = sandbox.path / "object.o"
with code_path.open("w") as f:
f.write('#line 1 "ctx.c"\n')
f.write(context)
f.write('\n')
f.write('#line 1 "src.c"\n')
f.write(code)
f.write('\n')
compiler_path = CompilerWrapper.base_path() / compiler
# Run compiler
try:
compile_proc = sandbox.run_subprocess(
_compilers[compiler]["cc"],
mounts=[compiler_path],
shell=True,
env={
"PATH": PATH,
"WINE": WINE,
"INPUT": sandbox.rewrite_path(code_path),
"OUTPUT": sandbox.rewrite_path(object_path),
"COMPILER_DIR": sandbox.rewrite_path(compiler_path),
"COMPILER_FLAGS": sandbox.quote_options(compiler_flags),
"MWCIncludes": "/tmp",
})
except subprocess.CalledProcessError as e:
# Compilation failed
logging.debug("Compilation failed: " + e.stderr)
return CompilationResult(b'', e.stderr)
if not object_path.exists():
logger.error("Compiler did not create an object file")
return CompilationResult(b'', "ERROR: Compiler did not create an object file")
return CompilationResult(object_path.read_bytes(), compile_proc.stderr)
@staticmethod
def assemble_asm(platform: str, asm: Asm, to_regenerate: Optional[Assembly] = None) -> Tuple[Optional[Assembly], Optional[str]]:
if platform not in _platforms:
logger.error(f"Platform {platform} not found")
return (None, f"Platform {platform} not found")
assemble_cmd = get_assemble_cmd(platform)
if not assemble_cmd:
logger.error(f"Assemble command for platform {platform} not found")
return (None, f"Assemble command for platform {platform} not found")
# Use the cache if we're not manually re-running an Assembly
if not to_regenerate:
cached_assembly, hash = _check_assembly_cache(platform, asm.hash)
if cached_assembly:
logger.debug(f"Assembly cache hit! hash: {hash}")
return (cached_assembly, None)
platform_cfg = _platforms[platform]
with Sandbox() as sandbox:
asm_path = sandbox.path / "asm.s"
asm_path.write_text(platform_cfg.asm_prelude + asm.data)
object_path = sandbox.path / "object.o"
# Run assembler
try:
assemble_proc = sandbox.run_subprocess(
platform_cfg.assemble_cmd,
mounts=[],
shell=True,
env={
"PATH": PATH,
"INPUT": sandbox.rewrite_path(asm_path),
"OUTPUT": sandbox.rewrite_path(object_path),
})
except subprocess.CalledProcessError as e:
# Compilation failed
logger.exception("Error running asm-differ")
return (None, e.stderr)
# Assembly failed
if assemble_proc.returncode != 0:
return (None, assemble_proc.stderr)
if not object_path.exists():
logger.error("Assembler did not create an object file")
return (None, "Assembler did not create an object file")
if to_regenerate:
assembly = to_regenerate
assembly.elf_object = object_path.read_bytes()
else:
assembly = Assembly(
hash=hash,
arch=platform_cfg.arch,
source_asm=asm,
elf_object=object_path.read_bytes(),
)
assembly.save()
return (assembly, None)
_compilers = load_compilers()
logger.info(f"Found {len(_compilers)} compiler(s): {', '.join(_compilers.keys())}")
_platforms = load_platforms()
logger.info(f"Available platform(s): {', '.join(CompilerWrapper.available_platforms().keys())}")
|
import unittest
import mock
from ...authentication.base import AuthenticationBase
from ...exceptions import Auth0Error
class TestBase(unittest.TestCase):
@mock.patch('requests.post')
def test_post(self, mock_post):
ab = AuthenticationBase()
mock_post.return_value.status_code = 200
mock_post.return_value.text = '{"x": "y"}'
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
mock_post.assert_called_with(url='the-url', data='{"a": "b"}',
headers={'c': 'd'})
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.post')
def test_post_error(self, mock_post):
ab = AuthenticationBase()
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"error": "e0",' \
'"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_code_property(self, mock_post):
ab = AuthenticationBase()
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"code": "e0",' \
'"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_no_error_code(self, mock_post):
ab = AuthenticationBase()
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_text_response(self, mock_post):
ab = AuthenticationBase()
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = 'there has been a terrible error'
with self.assertRaises(Auth0Error) as context:
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message,
'there has been a terrible error')
@mock.patch('requests.post')
def test_post_error_with_no_response_text(self, mock_post):
ab = AuthenticationBase()
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = None
with self.assertRaises(Auth0Error) as context:
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, '')
|
#!/usr/bin/env python3
# Hacked together by / Copyright 2021 Ross Wightman
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""Vision Transformer (ViT)
ViT: `"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"
<https://arxiv.org/abs/2010.11929>`_
DeiT: `"Training data-efficient image transformers & distillation through attention"
<https://arxiv.org/abs/2012.12877>`_
References:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from collections import OrderedDict
from typing import Callable, Optional, Union
import cv2
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import numpy as np
from loguru import logger
from megengine.utils.tuple_function import _pair as to_2tuple
from basecls.layers import DropPath, activation, init_vit_weights, norm2d, trunc_normal_
from basecls.utils import recursive_update, registers
__all__ = ["PatchEmbed", "Attention", "FFN", "EncoderBlock", "ViT"]
class PatchEmbed(M.Module):
"""Image to Patch Embedding
Args:
img_size: Image size. Default: ``224``
patch_size: Patch token size. Default: ``16``
in_chans: Number of input image channels. Default: ``3``
embed_dim: Number of linear projection output channels. Default: ``768``
flatten: Flatten embedding. Default: ``True``
norm_name: Normalization layer. Default: ``None``
"""
def __init__(
self,
img_size: int = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
flatten: bool = True,
norm_name: str = None,
**kwargs,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = M.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm2d(norm_name, embed_dim) if norm_name else None
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], (
f"Input image size ({H}*{W}) doesn't match model "
f"({self.img_size[0]}*{self.img_size[1]})."
)
x = self.proj(x)
if self.flatten:
x = F.flatten(x, 2).transpose(0, 2, 1)
if self.norm:
x = self.norm(x)
return x
class Attention(M.Module):
"""Self-Attention block.
Args:
dim: input Number of input channels.
num_heads: Number of attention heads. Default: ``8``
qkv_bias: If True, add a learnable bias to query, key, value. Default: ``False``
qk_scale: Override default qk scale of ``head_dim ** -0.5`` if set.
attn_drop: Dropout ratio of attention weight. Default: ``0.0``
proj_drop: Dropout ratio of output. Default: ``0.0``
"""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_scale: float = None,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = M.Linear(dim, dim * 3, bias=qkv_bias)
self.softmax = M.Softmax(axis=-1)
self.attn_drop = M.Dropout(attn_drop)
self.proj = M.Linear(dim, dim)
self.proj_drop = M.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.transpose(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = F.matmul(q, k.transpose(0, 1, 3, 2)) * self.scale
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = F.matmul(attn, v).transpose(0, 2, 1, 3).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FFN(M.Module):
"""FFN for ViT
Args:
in_features: Number of input features.
hidden_features: Number of input features. Default: ``None``
out_features: Number of output features. Default: ``None``
drop: Dropout ratio. Default: ``0.0``
act_name: activation function. Default: ``"gelu"``
"""
def __init__(
self,
in_features: int,
hidden_features: int = None,
out_features: int = None,
drop: float = 0.0,
act_name: str = "gelu",
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = M.Linear(in_features, hidden_features)
self.act = activation(act_name)
self.fc2 = M.Linear(hidden_features, out_features)
self.drop = M.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class EncoderBlock(M.Module):
"""Transformer Encoder block.
Args:
dim: Number of input channels.
num_heads: Number of attention heads.
ffn_ratio: Ratio of ffn hidden dim to embedding dim. Default: ``4.0``
qkv_bias: If True, add a learnable bias to query, key, value. Default: ``False``
qk_scale: Override default qk scale of ``head_dim ** -0.5`` if set.
drop: Dropout ratio of non-attention weight. Default: ``0.0``
attn_drop: Dropout ratio of attention weight. Default: ``0.0``
drop_path: Stochastic depth rate. Default: ``0.0``
norm_name: Normalization layer. Default: ``"LN"``
act_name: Activation layer. Default: ``"gelu"``
"""
def __init__(
self,
dim: int,
num_heads: int,
ffn_ratio: float = 4.0,
qkv_bias: bool = False,
qk_scale: float = None,
attn_drop: float = 0.0,
drop: float = 0.0,
drop_path: float = 0.0,
norm_name: str = "LN",
act_name: str = "gelu",
**kwargs,
):
super().__init__()
self.norm1 = norm2d(norm_name, dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else None
self.norm2 = norm2d(norm_name, dim)
ffn_hidden_dim = int(dim * ffn_ratio)
self.ffn = FFN(
in_features=dim, hidden_features=ffn_hidden_dim, drop=drop, act_name=act_name
)
def forward(self, x):
if self.drop_path:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.ffn(self.norm2(x)))
else:
x = x + self.attn(self.norm1(x))
x = x + self.ffn(self.norm2(x))
return x
@registers.models.register()
class ViT(M.Module):
"""ViT model.
Args:
img_size: Input image size. Default: ``224``
patch_size: Patch token size. Default: ``16``
in_chans: Number of input image channels. Default: ``3``
embed_dim: Number of linear projection output channels. Default: ``768``
depth: Depth of Transformer Encoder layer. Default: ``12``
num_heads: Number of attention heads. Default: ``12``
ffn_ratio: Ratio of ffn hidden dim to embedding dim. Default: ``4.0``
qkv_bias: If True, add a learnable bias to query, key, value. Default: ``True``
qk_scale: Override default qk scale of head_dim ** -0.5 if set. Default: ``None``
representation_size: Size of representation layer (pre-logits). Default: ``None``
distilled: Includes a distillation token and head. Default: ``False``
drop_rate: Dropout rate. Default: ``0.0``
attn_drop_rate: Attention dropout rate. Default: ``0.0``
drop_path_rate: Stochastic depth rate. Default: ``0.0``
embed_layer: Patch embedding layer. Default: :py:class:`PatchEmbed`
norm_name: Normalization layer. Default: ``"LN"``
act_name: Activation function. Default: ``"gelu"``
num_classes: Number of classes. Default: ``1000``
"""
def __init__(
self,
img_size: int = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
ffn_ratio: float = 4.0,
qkv_bias: bool = True,
qk_scale: float = None,
representation_size: int = None,
distilled: bool = False,
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
embed_layer: M.Module = PatchEmbed,
norm_name: str = "LN",
act_name: str = "gelu",
num_classes: int = 1000,
**kwargs,
):
super().__init__()
# Patch Embedding
self.embed_dim = embed_dim
self.patch_embed = embed_layer(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
# CLS & DST Tokens
self.cls_token = mge.Parameter(F.zeros([1, 1, embed_dim]))
self.dist_token = mge.Parameter(F.zeros([1, 1, embed_dim])) if distilled else None
self.num_tokens = 2 if distilled else 1
# Pos Embedding
self.pos_embed = mge.Parameter(F.zeros([1, num_patches + self.num_tokens, embed_dim]))
self.pos_drop = M.Dropout(drop_rate)
# Blocks
dpr = [
x.item() for x in F.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = M.Sequential(
*[
EncoderBlock(
dim=embed_dim,
num_heads=num_heads,
ffn_ratio=ffn_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_name=norm_name,
act_name=act_name,
)
for i in range(depth)
]
)
self.norm = norm2d(norm_name, embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = M.Sequential(
OrderedDict(
[("fc", M.Linear(embed_dim, representation_size)), ("act", activation("tanh"))]
)
)
else:
self.pre_logits = None
# Classifier head(s)
self.head = M.Linear(self.embed_dim, num_classes) if num_classes > 0 else None
self.head_dist = None
if distilled:
self.head_dist = M.Linear(self.embed_dim, num_classes) if num_classes > 0 else None
# Init
self.init_weights()
def init_weights(self):
trunc_normal_(self.pos_embed, std=0.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(init_vit_weights)
def forward(self, x):
x = self.patch_embed(x)
cls_token = F.broadcast_to(self.cls_token, (x.shape[0], 1, self.cls_token.shape[-1]))
if self.dist_token is None:
x = F.concat((cls_token, x), axis=1)
else:
dist_token = F.broadcast_to(self.dist_token, (x.shape[0], 1, self.dist_token.shape[-1]))
x = F.concat((cls_token, dist_token, x), axis=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
x = x[:, 0]
if self.pre_logits:
x = self.pre_logits(x)
else:
x = x[:, 0], x[:, 1]
if self.head_dist is not None:
x_cls, x_dist = x
if self.head:
x_cls = self.head(x_cls)
if self.head_dist:
x_dist = self.head_dist(x_dist)
if self.training:
# during inference, return the average of both classifier predictions
return x_cls, x_dist
else:
return (x_cls + x_dist) / 2
elif self.head:
x = self.head(x)
return x
def load_state_dict(
self,
state_dict: Union[dict, Callable[[str, mge.Tensor], Optional[np.ndarray]]],
strict=True,
):
if "pos_embed" in state_dict:
old_pos_embed = state_dict["pos_embed"]
old_n_patches = old_pos_embed.shape[1] - self.num_tokens
old_gs = int(math.sqrt(old_n_patches + 0.5))
new_n_patches = self.pos_embed.shape[1] - self.num_tokens
new_gs = int(math.sqrt(new_n_patches + 0.5))
logger.info("Position embedding grid-size from {} to {}", [old_gs] * 2, [new_gs] * 2)
logger.info(
"Resized position embedding: {} to {}", old_pos_embed.shape, self.pos_embed.shape
)
if isinstance(old_pos_embed, mge.Tensor):
old_pos_embed = old_pos_embed.numpy()
pos_emb_tok, old_pos_emb_grid = np.split(old_pos_embed, [self.num_tokens], axis=1)
old_pos_emb_grid = old_pos_emb_grid.reshape(old_gs, old_gs, -1).transpose(2, 0, 1)
new_pos_embed_grid = (
np.stack(
[
cv2.resize(c, (new_gs, new_gs), interpolation=cv2.INTER_CUBIC)
for c in old_pos_emb_grid
]
)
.transpose(1, 2, 0)
.reshape(1, new_gs ** 2, -1)
)
new_pos_embed = np.concatenate([pos_emb_tok, new_pos_embed_grid], axis=1)
if isinstance(old_pos_embed, mge.Tensor):
new_pos_embed = mge.Parameter(new_pos_embed)
state_dict["pos_embed"] = new_pos_embed
super().load_state_dict(state_dict, strict)
def _build_vit(**kwargs):
model_args = dict(depth=12, drop_path_rate=0.1)
recursive_update(model_args, kwargs)
return ViT(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_tiny_patch16_224/vit_tiny_patch16_224.pkl"
)
def vit_tiny_patch16_224(**kwargs):
model_args = dict(patch_size=16, embed_dim=192, num_heads=3)
recursive_update(model_args, kwargs)
return _build_vit(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_tiny_patch16_384/vit_tiny_patch16_384.pkl"
)
def vit_tiny_patch16_384(**kwargs):
model_args = dict(img_size=384)
recursive_update(model_args, kwargs)
return vit_tiny_patch16_224(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_small_patch16_224/vit_small_patch16_224.pkl"
)
def vit_small_patch16_224(**kwargs):
model_args = dict(patch_size=16, embed_dim=384, num_heads=6)
recursive_update(model_args, kwargs)
return _build_vit(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_small_patch16_384/vit_small_patch16_384.pkl"
)
def vit_small_patch16_384(**kwargs):
model_args = dict(img_size=384)
recursive_update(model_args, kwargs)
return vit_small_patch16_224(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_small_patch32_224/vit_small_patch32_224.pkl"
)
def vit_small_patch32_224(**kwargs):
model_args = dict(patch_size=32, embed_dim=384, num_heads=6)
recursive_update(model_args, kwargs)
return _build_vit(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_small_patch32_384/vit_small_patch32_384.pkl"
)
def vit_small_patch32_384(**kwargs):
model_args = dict(img_size=384)
recursive_update(model_args, kwargs)
return vit_small_patch32_224(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_base_patch16_224/vit_base_patch16_224.pkl"
)
def vit_base_patch16_224(**kwargs):
model_args = dict(patch_size=16, embed_dim=768, num_heads=12)
recursive_update(model_args, kwargs)
return _build_vit(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_base_patch16_384/vit_base_patch16_384.pkl"
)
def vit_base_patch16_384(**kwargs):
model_args = dict(img_size=384)
recursive_update(model_args, kwargs)
return vit_base_patch16_224(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_base_patch32_224/vit_base_patch32_224.pkl"
)
def vit_base_patch32_224(**kwargs):
model_args = dict(patch_size=32, embed_dim=768, num_heads=12)
recursive_update(model_args, kwargs)
return _build_vit(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/"
"vit/vit_base_patch32_384/vit_base_patch32_384.pkl"
)
def vit_base_patch32_384(**kwargs):
model_args = dict(img_size=384)
recursive_update(model_args, kwargs)
return vit_base_patch32_224(**model_args)
|
# Copyright (c) 2012, 2013, 2014 Ilya Otyutskiy <ilya.otyutskiy@icloud.com>
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import fcntl
import logging
import os
import signal
import sys
from types import FrameType, TracebackType
from typing import NoReturn, Optional, Type
def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None:
"""daemonize the current process
This calls fork(), and has the main process exit. When it returns we will be
running in the child process.
"""
# If pidfile already exists, we should read pid from there; to overwrite it, if
# locking will fail, because locking attempt somehow purges the file contents.
if os.path.isfile(pid_file):
with open(pid_file) as pid_fh:
old_pid = pid_fh.read()
# Create a lockfile so that only one instance of this daemon is running at any time.
try:
lock_fh = open(pid_file, "w")
except OSError:
print("Unable to create the pidfile.")
sys.exit(1)
try:
# Try to get an exclusive lock on the file. This will fail if another process
# has the file locked.
fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
print("Unable to lock on the pidfile.")
# We need to overwrite the pidfile if we got here.
#
# XXX better to avoid overwriting it, surely. this looks racey as the pid file
# could be created between us trying to read it and us trying to lock it.
with open(pid_file, "w") as pid_fh:
pid_fh.write(old_pid)
sys.exit(1)
# Fork, creating a new process for the child.
process_id = os.fork()
if process_id != 0:
# parent process: exit.
# we use os._exit to avoid running the atexit handlers. In particular, that
# means we don't flush the logs. This is important because if we are using
# a MemoryHandler, we could have logs buffered which are now buffered in both
# the main and the child process, so if we let the main process flush the logs,
# we'll get two copies.
os._exit(0)
# This is the child process. Continue.
# Stop listening for signals that the parent process receives.
# This is done by getting a new process id.
# setpgrp() is an alternative to setsid().
# setsid puts the process in a new parent group and detaches its controlling
# terminal.
os.setsid()
# point stdin, stdout, stderr at /dev/null
devnull = "/dev/null"
if hasattr(os, "devnull"):
# Python has set os.devnull on this system, use it instead as it might be
# different than /dev/null.
devnull = os.devnull
devnull_fd = os.open(devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
os.close(devnull_fd)
# now that we have redirected stderr to /dev/null, any uncaught exceptions will
# get sent to /dev/null, so make sure we log them.
#
# (we don't normally expect reactor.run to raise any exceptions, but this will
# also catch any other uncaught exceptions before we get that far.)
def excepthook(
type_: Type[BaseException],
value: BaseException,
traceback: Optional[TracebackType],
) -> None:
logger.critical("Unhanded exception", exc_info=(type_, value, traceback))
sys.excepthook = excepthook
# Set umask to default to safe file permissions when running as a root daemon. 027
# is an octal number which we are typing as 0o27 for Python3 compatibility.
os.umask(0o27)
# Change to a known directory. If this isn't done, starting a daemon in a
# subdirectory that needs to be deleted results in "directory busy" errors.
os.chdir(chdir)
try:
lock_fh.write("%s" % (os.getpid()))
lock_fh.flush()
except OSError:
logger.error("Unable to write pid to the pidfile.")
print("Unable to write pid to the pidfile.")
sys.exit(1)
# write a log line on SIGTERM.
def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn:
logger.warning("Caught signal %s. Stopping daemon." % signum)
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm)
# Cleanup pid file at exit.
def exit() -> None:
logger.warning("Stopping daemon.")
os.remove(pid_file)
sys.exit(0)
atexit.register(exit)
logger.warning("Starting daemon.")
|
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Input, InputSpec, Add, Subtract, Dot
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.callbacks import ModelCheckpoint
import os
from keras import regularizers
from keras import initializers, constraints, activations
from keras.layers import Layer
import numpy as np
class ModifiedSoftmaxLayer(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
# Example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
#@interfaces.legacy_dense_support
def __init__(self, units,
batch_size = 128,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ModifiedSoftmaxLayer, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.batch_size = batch_size
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
#self.batch_size = input_shape[-1]
input_features = input_shape[1]
self.feature_size = input_shape[1]
print("input_shape: " , input_shape)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.factormachine = self.add_weight(shape=(self.batch_size, self.units, input_features),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
x = inputs
W = self.kernel
#print("x:", x.shape)
#print("W:", W.shape)
weights_norm = tf.norm(W, axis=0, keepdims=True)
weights = tf.div(W, weights_norm, name="normalize_weights")
logits = tf.matmul(x, weights)
#print("self.factormachine:", self.factormachine.shape)
#Factor machine
#factors = K.dot(self.factormachine, x )
#print("self.batch_size: ", self.batch_size)
#print("self.feature_size: ", self.feature_size)
K.batch_dot()
features = []
for ii in range(self.batch_size):
xi = x[ii]
wi = self.factormachine[ii]
feature = tf.multiply(wi, xi)
feature = K.transpose(feature)
#print("feature: ", feature.shape)
features.append(feature)
feature_machine = []
for ii in range(self.batch_size):
#sum_a_keepdims = K.sum(a , axis=-1 , keepdims=True)
#K.sum()
sum = K.sum(features[ii], axis=0, keepdims=False)
#print("sum: ", sum)
diffs = []
#print("K.shape(features[ii]): ", K.shape(features[ii]))
for jj in range(self.feature_size):
diff = tf.subtract(sum, features[ii][jj]) #Subtract()([sum, x]) for x in features[ii]]
diffs.append(diff)
#print("diffs: ", diffs)
dots = []
for jj in range(self.feature_size):
#dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, features[ii])]
dot = tf.multiply(diffs[jj], features[ii][jj]) #K.dot(diffs[jj], features[ii][jj])
dots.append(dot)
sum = K.sum(dots, axis=0, keepdims=False)
feature_machine.append(sum)
# print("dots: ", dots)
# print("dots: ", dots[0].shape)
if self.use_bias:
output = K.bias_add(logits, self.bias, data_format='channels_last')
output = output + feature_machine
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModifiedSoftmaxLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ErrorLearning(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
# Example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
#@interfaces.legacy_dense_support
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ModifiedSoftmaxLayer, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
x = inputs
W = self.kernel
weights_norm = tf.norm(W, axis=0, keepdims=True)
weights = tf.div(W, weights_norm, name="normalize_weights")
logits = tf.matmul(x, weights)
if self.use_bias:
output = K.bias_add(logits, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModifiedSoftmaxLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
"""This module provides useful functions for the MFE package.
Attributes:
VALID_VALUE_PREFIX (:obj:`str`): Prefix which all tuples that
keep valid values for custom user options must use in its name.
This prefix is used to enable the automatic detection of these
groups.
VALID_GROUPS (:obj:`tuple` of :obj:`str`): Supported groups of
metafeatures of pymfe.
GROUP_PREREQUISITES (:obj:`tuple` of :obj:`List` or :obj:`str`):
Tuple for requisites of each group entry in ``VALID_GORUPS``.
This tuple must have one-to-one correspondence with ``VALID_GROUPS``.
Insert None is the correspondent metafeature group has no other
metafeature group as dependency. Each entry of this tuple can be
either a string (single dependency) or a collection of strings
(multiple dependencies), and every string must correspond to some
value in ``VALID_GROUPS``. In any case, if the used select some
metafeature group with some dependency, then this dependencies
will be automatically inserted as metafeature groups in the MFE
model. The output filtering (to remove possibly unwanted collection
of metafeatures) should be left to post-processing steps.
VALID_MFECLASSES (:obj:`tuple` of Classes): Metafeature extractors
predefined classes, where to perform the search of metafeature-ex-
traction methods.
VALID_SUMMARY (:obj:`tuple` of :obj:`str`): Supported summary
functions to combine metafeature values.
VALID_TIMEOPT (:obj:`tuple` of :obj:`str`): valid options for time
measurements while extracting metafeatures.
VALID_RESCALE (:obj:`tuple` of :obj:`str`): valid options for res-
caling numeric data while fitting dataset.
MTF_PREFIX (:obj:`str`): prefix of metafeature-extraction method
names for classes in ``VALID_MFECLASSES``. For example, the metafeature
called ``inst_nr`` is implemented in the method named ``[MTF_PREFIX]_-
inst_nr.`` Prefixation is used to enable the automatic detection of
these methods.
PRECOMPUTE_PREFIX (:obj:`str`): prefix for precomputation method names.
If a method of a class in ``VALID_MFECLASSES`` starts with this prefix,
it is automatically executed to gather values that this class frequen-
tly uses. These values are shared between all feature-extraction rela-
ted methods of all ``VALID_MFECLASSES`` classes to avoid redundant com-
putation.
TIMEOPT_AVG_PREFIX (:obj:`str`): prefix for time options based on the
average of gathered metrics. It means necessarily that; if this cons-
tant value prefixes an option, then this option is supposed to divide
the gathered time elapsed metrics by the cardinality of the features
extracted (``cardinality`` means ``number of``).
TIMEOPT_SUMMARY_SUFFIX (:obj:`str`): suffix for time options which in-
clude summarization time alongside the time necessary for the extracti-
on of the feature. It means that, if this constant value suffixes a ti-
me option, then the time metrics must include the time necessary for
the summarization of each value with cardinality greater than one
(``cardinality`` means ``number of values``).
"""
import typing as t
import inspect
import collections
import warnings
import time
import sys
import re
import numpy as np
import sklearn.preprocessing
import patsy
import pymfe._summary as _summary
import pymfe.general as general
import pymfe.statistical as statistical
import pymfe.info_theory as info_theory
import pymfe.landmarking as landmarking
import pymfe.relative as relative
import pymfe.clustering as clustering
import pymfe.model_based as model_based
import pymfe.complexity as complexity
import pymfe.itemset as itemset
import pymfe.concept as concept
import pymfe.scoring as scoring
VALID_VALUE_PREFIX = "VALID_"
DEFAULT_GROUP = (
"general",
"info-theory",
"statistical",
"model-based",
"landmarking",
) # type: t.Tuple[str, ...]
VALID_GROUPS = (
"landmarking",
"general",
"statistical",
"model-based",
"info-theory",
"relative",
"clustering",
"complexity",
"itemset",
"concept"
) # type: t.Tuple[str, ...]
GROUP_PREREQUISITES = (
None,
None,
None,
None,
None,
"landmarking",
None,
None,
None,
None
) # type: t.Tuple[t.Optional[str], ...]
VALID_MFECLASSES = (
landmarking.MFELandmarking,
general.MFEGeneral,
statistical.MFEStatistical,
model_based.MFEModelBased,
info_theory.MFEInfoTheory,
relative.MFERelativeLandmarking,
clustering.MFEClustering,
complexity.MFEComplexity,
itemset.MFEItemset,
concept.MFEConcept
) # type: t.Tuple
VALID_SUMMARY = (*_summary.SUMMARY_METHODS, ) # type: t.Tuple[str, ...]
VALID_TIMEOPT = (
"avg",
"avg_summ",
"total",
"total_summ",
)
_RESCALE_SCALERS = {
"standard": sklearn.preprocessing.StandardScaler,
"min-max": sklearn.preprocessing.MinMaxScaler,
"robust": sklearn.preprocessing.RobustScaler,
}
VALID_RESCALE = (*_RESCALE_SCALERS, )
TIMEOPT_AVG_PREFIX = "avg"
TIMEOPT_SUMMARY_SUFFIX = "summ"
MTF_PREFIX = "ft_"
PRECOMPUTE_PREFIX = "precompute_"
POSTPROCESS_PREFIX = "postprocess_"
TypeMtdTuple = t.Tuple[str, t.Callable[[], t.Any]]
"""Type annotation which describes the a metafeature method tuple."""
TypeExtMtdTuple = t.Tuple[str, t.Callable[[], t.Any], t.Sequence]
"""Type annotation which extends TypeMtdTuple with extra field (``Args``)"""
_TYPE_NUMERIC = (
int,
float,
np.number,
)
"""Tuple with generic numeric types."""
TypeNumeric = t.TypeVar(
"TypeNumeric",
int,
float,
np.number,
)
"""Typing alias of generic numeric types for static code checking."""
def warning_format(message: str,
category: t.Type[Warning],
filename: str,
lineno: int,
line: str = None) -> str:
"""Change warnings format to a simpler one.
Args:
message (:obj:`str`): warning message to print.
category: not used. Just to maintain consistency with warnings API.
filename: not used. Just to maintain consistency with warnings API.
lineno: not used. Just to maintain consistency with warnings API.
line: not used. Just to maintain consistency with warnings API.
Return:
str: formated warning message.
"""
# pylint: disable=W0613
return "Warning: {}\n".format(message)
warnings.formatwarning = warning_format
def _check_values_in_group(value: t.Union[str, t.Iterable[str]],
valid_group: t.Iterable[str],
wildcard: t.Optional[str] = "all"
) -> t.Tuple[t.Tuple[str, ...], t.Tuple[str, ...]]:
"""Checks if a value is in a set or a set of values is a subset of a set.
Args:
value (:obj:`iterable` of :obj:`str` or :obj:`str): value(s) to check
if is (are) in the given valid_group of strings.
valid_group (:obj:`iterable` of :obj:`str`): a valid_group of strings
representing the valid tokens which ``value`` is verified against
it.
wildcard (:obj:`str`, optional): a value which represents ``all valu-
es``, ignoring capital letters, so, for example, values ``all``,
``ALL`` and any mix of upper and lower case are all considered to
be the same wildcard token.
Returns:
tuple(tuple, tuple): A pair of tuples containing, respectively, values
that are in the given valid_group and those that are not.
Raises:
TypeError: if ``value`` is not an iterable type or some of its elements
are not a :obj:`str` type.
"""
if not isinstance(value, collections.Iterable):
raise TypeError("Parameter type is not "
"consistent ({0}).".format(type(value)))
in_group = tuple() # type: t.Tuple[str, ...]
not_in_group = tuple() # type: t.Tuple[str, ...]
if isinstance(value, str):
value = value.lower()
if wildcard and value == wildcard.lower():
in_group = tuple(valid_group)
elif value in valid_group:
in_group = (value, )
else:
not_in_group = (value, )
else:
value_set = set(map(str.lower, value))
if wildcard and wildcard.lower() in value_set:
in_group = tuple(valid_group)
else:
in_group = tuple(value_set.intersection(valid_group))
not_in_group = tuple(value_set.difference(valid_group))
return tuple(in_group), tuple(not_in_group)
def get_prefixed_mtds_from_class(
class_obj: t.Any,
prefix: str,
only_name: bool = False,
prefix_removal: bool = False,
) -> t.Union[t.List[str], t.List[TypeMtdTuple]]:
"""Get all class methods from ``class_obj`` prefixed with ``prefix``.
Args:
class_obj (:obj:`Class`): Class from which the methods should be
extracted.
prefix (:obj:`str`): prefix which method names must have in order
to it be gathered.
only_name (:obj:`bool`, optional): if True, return just the name
of the methods.
prefix_removal (:obj:`str`, optional): If True, then the returned
method names will have the ``prefix`` removed.
Returns:
list(tuple): if ``only_name`` is False, then this function return
a list of tuples in the form (`mtd_name`, `mtd_address`) of
all class methods from ``class_obj`` prefixed with ``prefix``.
If ``only_name`` is True, this list will contain just the
method names.
"""
class_methods = inspect.getmembers(
class_obj, predicate=inspect.ismethod) # type: t.List[TypeMtdTuple]
# It is assumed that all feature-extraction related methods
# name are all prefixed with "MTF_PREFIX" and all precomputa-
# tion methos, prefixed with "PRECOMPUTE_PREFIX".
feat_mtd_list = [] # type: t.List
for ft_method in class_methods:
mtd_name, remaining_data = ft_method[0], ft_method[1:]
if mtd_name.startswith(prefix):
if prefix_removal:
mtd_name = remove_prefix(value=mtd_name, prefix=prefix)
if only_name:
feat_mtd_list.append(mtd_name)
elif prefix_removal:
feat_mtd_list.append((mtd_name, *remaining_data))
else:
feat_mtd_list.append(ft_method)
return feat_mtd_list
def _get_all_prefixed_mtds(
prefix: str,
groups: t.Tuple[str, ...],
update_groups_by: t.Optional[t.Union[t.FrozenSet[str],
t.Set[str]]] = None,
prefix_removal: bool = False,
custom_class_: t.Any = None,
) -> t.Dict[str, t.Tuple]:
"""Get all methods prefixed with ``prefix`` in predefined feature ``groups``.
The predefined metafeature groups are inside ``VALID_GROUPS`` attribute.
Args:
prefix (:obj:`str`): gather methods prefixed with this value.
groups (:obj:`Tuple` of :obj:`str`): a tuple of feature group names.
It can assume value :obj:`NoneType`, which is interpreted as ``no
filter`` (i.e. all features of all groups will be returned).
return_groups (:obj:`bool`, optional): if True, then the returned value
will be a :obj:`dict` (instead of a :obj:`tuple`) which maps each
group (as keys) with its correspondent values (as :obj:`tuple`s).
update_groups_by (:obj:`set` of :obj:`str`, optional): values to filter
``groups``. This function also returns a new version of ``groups``
with all its elements that do not contribute with any new method
for the final output. It other words, it is removed any group which
do not contribute to the output of this function. This is particu-
larly useful for precomputations, as it helps avoiding unecessary
precomputation methods from feature groups not related with user
selected features.
prefix_removal (:obj:`bool`, optional): if True, then the returned
method names will not have the ``prefix``.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the prefixed methods.
Returns:
If ``filter_groups_by`` argument is :obj:`NoneType` or empty:
tuple: with all filtered methods by ``group``.
Else:
tuple(tuple, tuple): the first field is the output described above,
the second field is a new version of ``groups``, with all ele-
ments that do not contribute with any element listed in the set
``update_groups_by`` removed.
"""
groups = tuple(set(VALID_GROUPS).intersection(groups))
if not groups and custom_class_ is None:
return {"methods": tuple(), "groups": tuple()}
if custom_class_ is None:
verify_groups = tuple(VALID_GROUPS)
verify_classes = tuple(VALID_MFECLASSES)
else:
verify_groups = ("test_methods", )
verify_classes = (custom_class_, )
methods_by_group = {
ft_type_id: get_prefixed_mtds_from_class(
class_obj=mfe_class,
prefix=prefix,
prefix_removal=prefix_removal)
for ft_type_id, mfe_class in zip(verify_groups, verify_classes)
if ft_type_id in groups or custom_class_ is not None
}
gathered_methods = [] # type: t.List[t.Union[str, TypeMtdTuple]]
new_groups = [] # type: t.List[str]
for group_name in methods_by_group:
group_mtds = methods_by_group[group_name]
gathered_methods += group_mtds
if update_groups_by:
group_mtds_names = {
remove_prefix(mtd_name, prefix=MTF_PREFIX)
if not prefix_removal
else mtd_name
for mtd_name, _ in group_mtds
}
if not update_groups_by.isdisjoint(group_mtds_names):
new_groups.append(group_name)
ret_val = {
"methods": tuple(gathered_methods),
} # type: t.Dict[str, t.Tuple]
if update_groups_by:
ret_val["groups"] = tuple(new_groups)
return ret_val
def _preprocess_iterable_arg(
values: t.Union[str, t.Iterable[str]]) -> t.List[str]:
"""Process ``values`` to a canonical form.
This canonical form consists in removing repeated elements from ``values``,
and cast all elements to lower-case.
Args:
values (:obj:`iterable` of :obj:`str` or :obj:`str`): feature names or
a collection of to be processed into a canonical form.
Returns:
list: ``values`` values as iterable. The values within strings all low-
er-cased.
"""
if isinstance(values, str):
values = {values}
return list(map(str.lower, set(values)))
def _extract_mtd_args(ft_mtd_callable: t.Callable) -> t.Tuple[str, ...]:
"""Extracts arguments from given method.
Args:
ft_mtd_callable (:obj:`callable`): a callable related to a feature
extraction method.
Returns:
list: containing the name of arguments of ``ft_mtd_callable``.
Raises:
TypeError: if ``ft_mtd_callable`` is not a valid callable.
"""
ft_mtd_signature = inspect.signature(ft_mtd_callable)
mtd_callable_args = tuple(ft_mtd_signature.parameters.keys())
return mtd_callable_args
def summarize(
features: t.Union[np.ndarray, t.Sequence],
callable_sum: t.Callable,
callable_args: t.Optional[t.Dict[str, t.Any]] = None,
remove_nan: bool = True,
) -> t.Union[t.Sequence, TypeNumeric]:
"""Returns ``feature`` values summarized by ``callable_sum``.
Args:
features (:obj:`Sequence` of numerics): Sequence containing values
to summarize.
callable_sum (:obj:`callable`): callable of the method which im-
plements the desired summary function.
callable_args (:obj:`dict`, optional): arguments to the summary fun-
ction. The expected dictionary format is the following: {`argu-
ment_name`: value}. To know the summary function arguments, you
need to check out the documentation of the method which implemen-
ts it.
remove_nan (:obj:`bool`, optional): check and remove all elements
in `features` which are not numeric. Note that :obj:`np.inf`
is still considered numeric (:obj:`float` type).
Returns:
float: value of summarized feature values, if possible. May return
:obj:`np.nan` if summary function call invokes TypeError, Value-
Error or ZeroDivisionError.
Raises:
AttributeError: if ``callable_sum`` is invalid.
TypeError: if ``features`` is not a sequence.
"""
processed_feat = np.array(features)
if remove_nan:
numeric_vals = list(map(isnumeric, features))
processed_feat = processed_feat[numeric_vals]
processed_feat = processed_feat.astype(np.float32)
if callable_args is None:
callable_args = {}
try:
metafeature = callable_sum(processed_feat, **callable_args)
except (TypeError, ValueError, ZeroDivisionError):
metafeature = np.nan
return metafeature
def get_feat_value(
mtd_name: str,
mtd_args: t.Dict[str, t.Any],
mtd_callable: t.Callable,
suppress_warnings: bool = False) -> t.Union[TypeNumeric, np.ndarray]:
"""Extract features from ``mtd_callable`` with ``mtd_args`` as args.
Args:
mtd_name (:obj:`str`): name of the feature-extraction method
to be invoked.
mtd_args (:obj:`dict`): arguments of method to be invoked. The
expected format of the arguments is {`argument_name`: value}.
In order to know the method arguments available, you need to
check its documentation.
mtd_callable (:obj:`callable`): callable of the feature-extraction
method.
suppress_warnings (:obj:`bool`, optional): if True, all warnings
invoked before invoking the method (or after) will be ignored.
The method (from ``mtd_callable``) itself may still invoke war-
nings.
Returns:
numeric or array: return value of the feature-extraction method.
Raises:
AttributeError: if ``mtd_callable`` is not valid.
"""
try:
features = mtd_callable(**mtd_args)
except (TypeError, ValueError, ZeroDivisionError) as type_e:
if not suppress_warnings:
warnings.warn(
"Error extracting {0}: \n{1}.\nWill set it "
"as 'np.nan' for all summary functions.".format(
mtd_name, repr(type_e)), RuntimeWarning)
features = np.nan
return features
def build_mtd_kwargs(mtd_name: str,
mtd_args: t.Iterable[str],
inner_custom_args: t.Optional[t.Dict[str, t.Any]] = None,
user_custom_args: t.Optional[t.Dict[str, t.Any]] = None,
precomp_args: t.Optional[t.Dict[str, t.Any]] = None,
suppress_warnings: bool = False) -> t.Dict[str, t.Any]:
"""Build a ``kwargs`` (:obj:`dict`) for a feature-extraction :obj:`callable`.
Args:
mtd_name (:obj:`str`): name of the method.
mtd_args (:obj:`iterable` of :obj:`str`): iterable containing the name
of all arguments of the callable.
inner_custom_args (:obj:`dict`, optional): custom arguments for inner
usage, for example, to pass ``X``, ``y`` or other user-independent
arguments necessary for the callable. The expected format of this
dict is {`argument_name`: value}.
user_custom_args (:obj:`dict`, optional): assumes the same model as the
dict above, but this one keeps user-dependent arguments for method
callable, for example, number of bins of a histogram-like metafea-
ture or degrees of freedom of a standard deviation-related metafe-
ature. The name of the arguments must be verified in its correspon-
dent method documentation.
precomp_args (:obj:`dict`, optional): precomputed cached arguments whi-
ch may be used for the feature-extraction method to speed up its
calculations.
suppress_warnings(:obj:`bool`, optional): if True, do not show any war-
nings about unknown callable parameters.
Returns:
dict: a ready-to-use ``kwargs`` for the correspondent callable. The
format is {``argument_name``: value}.
"""
if user_custom_args is None:
user_custom_args = {}
if inner_custom_args is None:
inner_custom_args = {}
if precomp_args is None:
precomp_args = {}
combined_args = {
**user_custom_args,
**inner_custom_args,
**precomp_args,
}
callable_args = {
custom_arg: combined_args[custom_arg]
for custom_arg in combined_args if custom_arg in mtd_args
}
if not suppress_warnings:
unknown_arg_set = (unknown_arg
for unknown_arg in user_custom_args.keys()
if unknown_arg not in mtd_args
) # type: t.Generator[str, None, None]
for unknown_arg in unknown_arg_set:
warnings.warn(
'Unknown argument "{0}" for method "{1}".'.format(
unknown_arg, mtd_name), UserWarning)
return callable_args
def check_summary_warnings(value: t.Union[TypeNumeric, t.Sequence, np.ndarray],
name_feature: str, name_summary: str) -> None:
"""Check if there is :obj:`np.nan` within summarized values.
Args:
value (numeric or :obj:`Sequence`): summarized values.
name_feature (:obj:`str`): name of the feature-extraction
method used to generate the values which was summarized.
name_summary (:obj:`str`): name of the summary method
used to produce `value`.
"""
if not isinstance(value, collections.Iterable):
value = [value]
if any(np.isnan(value)):
warnings.warn(
"Failed to summarize {0} with {1}. "
"(generated NaN).".format(name_feature, name_summary),
RuntimeWarning)
def convert_alias(groups_alias: t.Iterable[t.Iterable],
values: t.Optional[t.Union[t.Iterable[str], str]] = None
) -> t.List[str]:
"""Change the values of the alias to the groups.
"""
if not values:
values = []
elif isinstance(values, str):
values = [values]
else:
values = list(values)
for alias_name, alias_value in groups_alias:
# verifying if the alias is in the set
if alias_name in values:
values.remove(alias_name) # remove from values
values = list(values) + list(alias_value) # add real groups
return values
def process_generic_set(
values: t.Optional[t.Union[t.Iterable[str], str]],
group_name: str,
wildcard: t.Optional[str] = "all",
groups_alias: t.Iterable[t.Iterable] = None,
allow_none: bool = False,
allow_empty: bool = False,
) -> t.Tuple[str, ...]:
"""Check if given ``values`` are in an internal valid set named ``group_name``.
Args:
values (:obj:`iterable` of :obj:`str` or :obj:`str`): a group os values
or a single value to process.
group_name (:obj:`str`, optional): name of which internal group ``valu-
es`` should be searched inside. Please check this module Attribute
documentation to verify which groups are available for valid opti-
ons. The constant ``VALID_GROUPS_PREFIX`` always should prefix gro-
up options, and this parameter must be the name of the group with-
out its prefix. For example, to select ``VALID_CLASSES`` group for
``values`` reference, then group_names must be just ``classes``.
groups_alias (:obj:`iterable` of :obj:`iterable`): a list of tuples of
aliases. Each tuple should have in the alias name in the first
position and the real groups mapped int he second position.
wildcard (:obj:`str`, optional): special value to ``accept any value``.
allow_none (:obj:`bool`, optional): if True, then :obj:`NoneType` is
a accepted as ``values``. Note that, if ``values`` is an iterable,
it does not mean that :obj:`NoneType` will become a valid value wi-
thin, but ``values`` can assume value :obj:`NoneType`.
allow_empty (:obj:`bool`, optional): if True, then ``values`` can be an
zero-length iterable.
Return:
tuple: lower-cased tuple with all valid values.
Raises:
TypeError: if ``group_name`` is :obj:`NoneType`.
ValueError: These are the conditions for raising this exception:
- Some element in ``values`` is a valid value (not in the
selected valid values based in ``group_name`` argument).
- ``values`` is None and ``allow_none`` is False.
- ``values`` is a empty sequence and ``allow_empty`` is False.
- ``group_name`` is ``summary`` or ``features``, as both of
these groups have their own special function to process
user custom arguments (check ``process_features`` and
``process_summary`` for more information).
- ``group_names`` is not a valid group for ``values`` reference.
"""
if not group_name:
raise TypeError('"group_name" can not be empty or None.')
if values is None:
if allow_none:
return tuple()
raise ValueError('"Values" can not be None. (while checking '
'group "{}").'.format(group_name))
if values is not None and not values:
if allow_empty:
return tuple()
raise ValueError('"Values" can not be empty. (while checking '
'group "{}")'.format(group_name))
if group_name.upper() in ("SUMMARY", "FEATURES"):
raise ValueError('Forbidden "group_name" option ({}). There is a '
"specify processing method for it".format(group_name))
_module_name = sys.modules[__name__]
try:
valid_values = inspect.getattr_static(
_module_name, "{0}{1}".format(VALID_VALUE_PREFIX,
group_name.upper()))
except AttributeError:
raise ValueError('Invalid "group_name" "{}". Check _internal '
"module documentation to verify which ones "
"are available for use.".format(group_name))
if groups_alias:
values = convert_alias(groups_alias, values)
in_valid_set, not_in_valid_set = _check_values_in_group(
value=values,
valid_group=valid_values,
wildcard=wildcard)
if not_in_valid_set:
raise ValueError("Unknown values: {0}. "
"Please select values in {1}.".format(
not_in_valid_set, valid_values))
return in_valid_set
def solve_group_dependencies(
groups: t.Tuple[str, ...],
) -> t.Tuple[t.Tuple[str, ...], t.FrozenSet[str]]:
"""Solve dependencies between groups.
Those dependencies must be registered in ``GROUP_PREFEQUISITES`` tuple.
"""
inserted_dependencies = set()
cur_dependencies = None # type: t.Optional[t.Union[t.Set[str], str]]
for group in groups:
if group in VALID_GROUPS:
cur_dependencies = GROUP_PREREQUISITES[VALID_GROUPS.index(group)]
if cur_dependencies:
if isinstance(cur_dependencies, str):
cur_dependencies = {cur_dependencies}
inserted_dependencies.update(
set(cur_dependencies).difference(groups))
groups = tuple(set(groups).union(inserted_dependencies))
return groups, frozenset(inserted_dependencies)
def process_generic_option(
value: t.Optional[str],
group_name: str,
allow_none: bool = False,
allow_empty: bool = False,
) -> t.Optional[str]:
"""Check if given ``value`` is in an internal reference group of values.
This function is essentially a wrapper for the ``process_generic_set``
function, with some differences:
- Only string-typed values are accepted, with the exception that
it can also assume :obj:`NoneType` if ``allow_none`` is True.
- The return value is not a tuple, but instead a lower-cased ver-
sion of ``value``.
Check ``process_generic_set`` for more precise information about this
process.
Return:
str: lower-cased version of ``value``.
Raises:
TypeError: if value is neither :obj:`NoneType` (and ``allow_none`` is
also True) nor a :obj:`str` type object.
All exceptions from ``process_generic_set`` are also raised, with the
same conditions as described in that function documentation.
"""
if value is not None and not isinstance(value, str):
raise TypeError('"value" (group name {}) must be a string-'
"type object (got {}).".format(group_name,
type(value)))
processed_value = process_generic_set(
values=value,
group_name=group_name,
wildcard=None,
allow_none=allow_none,
allow_empty=allow_empty)
canonical_value = None
if processed_value:
canonical_value = processed_value[0]
if not isinstance(canonical_value, str):
canonical_value = None
return canonical_value
def process_summary(
summary: t.Union[str, t.Iterable[str]],
wildcard: str = "all"
) -> t.Tuple[t.Tuple[str, ...], t.Tuple[TypeExtMtdTuple, ...]]:
"""Generate metadata from ``summary`` MFE instantiation argument.
Args:
summary (:obj:`t.Iterable` of :obj:`str` or a :obj:`str`): a
summary function or a list of these, which are used to
combine different calculations of the same metafeature. Check
``MFE`` Class documentation for more information about this
parameter.
wildcard (:obj:`str`): value to be used as ``select all`` value.
Returns:
tuple(tuple, tuple): the first field contains all valid lower-cased
summary function names, where the last field contains internal
metadata about methods which implement each summary function.
This last tuple model is:
(
`summary_mtd_name`,
`summary_mtd_callable`,
`summary_mtd_args`,
)
Raises:
TypeError: if `summary` is not :obj:`NoneType`, empty, a valid string
nor an iterable containing valid group identifiers as strings.
"""
if not summary:
return tuple(), tuple()
in_group, not_in_group = _check_values_in_group(
value=summary,
valid_group=VALID_SUMMARY,
wildcard=wildcard)
if not_in_group:
raise ValueError("Unknown summary: {0}. "
"Please select values in {1}.".format(
not_in_group, VALID_SUMMARY))
summary_methods = [] # type: t.List[TypeExtMtdTuple]
available_sum_methods = [] # type: t.List[str]
for summary_func in in_group:
summary_mtd_callable = _summary.SUMMARY_METHODS.get(summary_func)
if not summary_mtd_callable:
warnings.warn("Missing summary function "
"{0} at _summary module.".format(summary_func),
RuntimeWarning)
else:
try:
summary_mtd_args = _extract_mtd_args(summary_mtd_callable)
except ValueError:
summary_mtd_args = tuple()
summary_mtd_pack = (
summary_func,
summary_mtd_callable,
summary_mtd_args,
)
summary_methods.append(summary_mtd_pack)
available_sum_methods.append(summary_func)
return tuple(available_sum_methods), tuple(summary_methods)
def process_features(
features: t.Union[str, t.Iterable[str]],
groups: t.Tuple[str, ...],
wildcard: str = "all",
suppress_warnings: bool = False,
custom_class_: t.Any = None,
) -> t.Tuple[t.Tuple[str, ...],
t.Tuple[TypeExtMtdTuple, ...],
t.Tuple[str, ...]]:
"""Generate metadata from ``features`` MFE instantiation argument.
The use of this function to happen after ``process_groups`` function, as
``groups`` parameter is expected to be in a canonical form (lower-cased
values inside a tuple).
Args:
features (:obj:`iterable` of `str` or `str`): iterable containing a
collection of features or a string describing a single feature. No-
te that only features that are in the given `groups` will be retur-
ned.
groups (:obj:`Tuple` of :obj:`str`, optional): collection containing
one or more group identifiers. Check out ``MFE`` class documenta-
tion for more information.
wildcard (:obj:`str`, optional): value to be used as ``select all`` for
``features`` argument.
suppress_warnings (:obj:`bool`, optional): if True, hide all warnings
raised during this method processing.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the metafeature extraction methods.
Returns:
tuple(tuple, tuple): A pair of tuples. The first Tuple is all feature
names extracted from this method, to give the user easy access to
available features in the model. The second field is a tuple for
internal usage, containing metadata in the form of tuples in the
following format: (`mtd_name`, `mtd_callable`, `mtd_args`), i.e.,
the first tuple item field is a string containing the name of a
feature-extraction related method, and the second field is a cal-
lable object for the corresponding method, and the third is the
method arguments.
Raises:
ValueError: if features is :obj:`NoneType` or is empty.
"""
if not features:
raise ValueError('"features" can not be None nor empty.')
if not groups:
if custom_class_ is None:
groups = tuple()
else:
groups = ("custom", )
processed_ft = _preprocess_iterable_arg(features) # type: t.List[str]
reference_values = None
if wildcard not in processed_ft:
reference_values = frozenset(processed_ft)
mtds_metadata = _get_all_prefixed_mtds(
prefix=MTF_PREFIX,
update_groups_by=reference_values,
groups=groups,
custom_class_=custom_class_,
prefix_removal=True,
) # type: t.Dict[str, t.Tuple]
ft_mtds_filtered = mtds_metadata.get(
"methods", tuple()) # type: t.Tuple[TypeMtdTuple, ...]
groups = mtds_metadata.get("groups", groups)
del mtds_metadata
if wildcard in processed_ft:
processed_ft = [mtd_name for mtd_name, _ in ft_mtds_filtered]
available_feat_names = [] # type: t.List[str]
ft_mtd_processed = [] # type: t.List[TypeExtMtdTuple]
for ft_mtd_tuple in ft_mtds_filtered:
ft_mtd_name, ft_mtd_callable = ft_mtd_tuple
if ft_mtd_name in processed_ft:
mtd_callable_args = _extract_mtd_args(ft_mtd_callable)
extended_item = (*ft_mtd_tuple,
mtd_callable_args) # type: TypeExtMtdTuple
ft_mtd_processed.append(extended_item)
available_feat_names.append(ft_mtd_name)
processed_ft.remove(ft_mtd_name)
if not suppress_warnings:
for unknown_ft in processed_ft:
warnings.warn('Unknown feature "{}"'.format(unknown_ft),
UserWarning)
return tuple(available_feat_names), tuple(ft_mtd_processed), groups
def _patch_precomp_groups(
precomp_groups: t.Optional[t.Union[str, t.Iterable[str]]],
groups: t.Optional[t.Tuple[str, ...]] = None,
) -> t.Union[str, t.Iterable[str]]:
"""Enforce precomputation in model-based metafeatures."""
if not precomp_groups:
precomp_groups = set()
# Enforce precomputation from model-based metafeature group
# due to strong dependencies of machine learning models.
if groups and not isinstance(precomp_groups, str):
if "model-based" in groups and "model-based" not in precomp_groups:
precomp_groups = set(precomp_groups).union({"model-based"})
return precomp_groups
def process_precomp_groups(
precomp_groups: t.Optional[t.Union[str, t.Iterable[str]]],
groups: t.Optional[t.Tuple[str, ...]] = None,
wildcard: str = "all",
suppress_warnings: bool = False,
custom_class_: t.Any = None,
**kwargs) -> t.Dict[str, t.Any]:
"""Process ``precomp_groups`` argument while fitting into a MFE model.
This function is expected to be used after ``process_groups`` function,
as ``groups`` parameter is expected to be in a canonical form (lower-cased
values inside a tuple).
Args:
precomp_groups (:obj:`iterable` of `str` or `str`): a single or a se-
quence of metafeature group names whose precomputation methods
should be taken. Note that any group not in ``groups`` (see argu-
ment below) is completely ignored.
groups (:obj:`Tuple` of :obj:`str`, optional): collection containing
one or more group identifiers. Check out ``MFE`` class documenta-
tion for more information.
wildcard (:obj:`str`, optional): value to be used as ``select all``
for ``precompute`` argument.
suppress_warnings (:obj:`bool`, optional): if True, suppress warnings
invoked while processing precomputation option.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the preprocomputing methods.
**kwargs: used to pass extra custom arguments to precomputation metho-
ds.
Returns:
dict: precomputed values given by ``kwargs`` using convenient methods
based in valid selected metafeature groups.
"""
if groups is None:
groups = tuple()
precomp_groups = _patch_precomp_groups(precomp_groups, groups)
if not precomp_groups and custom_class_ is None:
return {}
processed_precomp_groups = _preprocess_iterable_arg(
precomp_groups) # type: t.Sequence[str]
if wildcard in processed_precomp_groups:
processed_precomp_groups = groups
elif custom_class_ is None:
if not suppress_warnings:
unknown_groups = set(processed_precomp_groups).difference(groups)
for unknown_precomp in unknown_groups:
warnings.warn(
'Unknown precomp_groups "{0}"'.format(unknown_precomp),
UserWarning)
processed_precomp_groups = tuple(
set(processed_precomp_groups).intersection(groups))
mtds_metadata = _get_all_prefixed_mtds(
prefix=PRECOMPUTE_PREFIX,
groups=tuple(processed_precomp_groups),
custom_class_=custom_class_,
) # type: t.Dict[str, t.Tuple]
precomp_mtds_filtered = mtds_metadata.get(
"methods", tuple()) # type: t.Tuple[TypeMtdTuple, ...]
del mtds_metadata
precomp_items = {} # type: t.Dict[str, t.Any]
for precomp_mtd_tuple in precomp_mtds_filtered:
precomp_mtd_name, precomp_mtd_callable = precomp_mtd_tuple
try:
new_precomp_vals = precomp_mtd_callable(**kwargs) # type: ignore
except (AttributeError, TypeError, ValueError) as type_err:
new_precomp_vals = {}
if not suppress_warnings:
warnings.warn("Something went wrong while "
'precomputing "{0}". Will ignore '
"this method. Error message:\n"
"{1}.".format(precomp_mtd_name, repr(type_err)))
if new_precomp_vals:
precomp_items = {
**precomp_items,
**new_precomp_vals,
}
# Update kwargs to avoid recalculations iteratively
kwargs = {
**kwargs,
**new_precomp_vals,
}
return precomp_items
def check_data(X: t.Union[np.ndarray, list],
y: t.Union[np.ndarray, list]
) -> t.Tuple[np.ndarray, np.ndarray]:
"""Checks ``X`` and ``y`` data type and shape and transform it if necessary.
Args:
Check ``mfe.fit`` method for more information.
Raises:
TypeError: if ``X`` or ``y`` is neither a np.ndarray nor a list-
type object.
ValueError: if ``X`` is empty or number of rows between X and Y
mismatch.
Returns:
tuple(np.ndarray, np.ndarray): ``X`` and ``y`` possibly reshaped and
casted to :obj:`np.ndarray` type.
"""
if not isinstance(X, (np.ndarray, list)):
raise TypeError('"X" is neither "list" nor "np.array".')
if not isinstance(y, (np.ndarray, list)):
raise TypeError('"y" is neither "list" nor "np.array".')
# We force numpy array to assume ``dtype=np.object`` because sometimes
# ``X`` can be a mixed matrix containing ``float``, ``int``, and ``str``
# values. If we do not this, ``X`` will be converted to string matrix. This
# erroneous conversion could make numeric attributes are cast to string.
# Then, when pymfe makes the automatic conversion, they will be processed
# with one-hot-encoding, and we do not want that to happen.
# See this example:
#
# String
# >>>np.array(['test', 1])
#
# Object
# >>>np.array(['test', 1], dtype=np.object)
if not isinstance(X, np.ndarray):
X = np.array(X, dtype=np.object)
if not isinstance(y, np.ndarray):
y = np.array(y, dtype=np.object)
y = y.flatten()
if len(X.shape) == 1 and X.shape[0]:
X = X.reshape(*X.shape, -1)
if X.shape[0] == 0 or y.shape[0] == 0:
raise ValueError('Neither "X" nor "y" can be empty.')
if X.shape[0] != y.shape[0]:
raise ValueError('"X" number of rows and "y" '
"length shapes do not match.")
return np.copy(X), np.copy(y)
def isnumeric(
value: t.Any,
check_subtype: bool = True) -> bool:
"""Checks if ``value`` is a numeric type or a collection of numerics.
The ``Numeric Type`` is assumed to be one of the following:
1. :obj:`int`
2. :obj:`float`
3. :obj:`np.number`
Args:
value (:obj:`Any`): any object to be checked as numeric or a collec-
tion of numerics.
check_subtype (:obj:`bool`, optional): if True, check elements of
``value`` if it is an iterable object. Otherwise, only checks
``value`` type, ignoring the fact that it can be an iterable ob-
ject.
Returns:
bool: True if `value` is a numeric type object or a collection of nume-
ric-only elements. False otherwise.
"""
if (check_subtype
and isinstance(value, (collections.Iterable, np.ndarray))
and not isinstance(value, str)):
value = np.array(value)
if value.size == 0:
return False
return all(isinstance(x, _TYPE_NUMERIC) for x in value)
return isinstance(value, _TYPE_NUMERIC)
def remove_prefix(value: str, prefix: str) -> str:
"""Remove ``prefix`` from ``value``.
Args:
value (:obj:`str`): method name prefixed with value stored in
``prefix``.
Returns:
str: ``value`` without ``prefix``.
Raises:
TypeError: if ``value`` is not a string.
"""
if value.startswith(prefix):
return value[len(prefix):]
return value
def timeit(func: t.Callable, *args) -> t.Tuple[t.Any, float]:
"""Measure how much time is necessary for calling ``func`` with ``args``.
Args:
func (:obj:`callable`): a callable which invokation time will be
measured from.
*args: additional arguments for ``func``.
Return:
tuple[any, float]: the first element is the return value from ``func``.
The second argument is the time necessary for the invokation of
``func``.
Raises:
This method raises all exceptions from ``func``.
"""
t_start = time.time()
ret_val = func(*args)
time_total = time.time() - t_start
return ret_val, time_total
def transform_cat(data_categoric: np.ndarray) -> t.Optional[np.ndarray]:
"""Transform categorical data using a model matrix.
The formula used for this transformation is just the union (+) of all cat-
egoric attributes using formula language from ``patsy`` package API, re-
moving the intercept terms: ``~ 0 + A_1 + ... + A_n``, where ``n`` is the
number of attributes and A_i is the ith categoric attribute, 1 <= i <= n.
"""
if data_categoric.size == 0:
return None
_, num_col = data_categoric.shape
dummy_attr_names = [
"C{}".format(i) for i in range(num_col)
]
named_data = {
# attr_name: data_categoric[:, attr_index]
# We need to cast to 'str' because sometimes categorical can be set as
# 'string'.
attr_name: data_categoric[:, attr_index].astype('str')
for attr_index, attr_name in enumerate(dummy_attr_names)
}
formula = "~ 0 + {}".format(" + ".join(dummy_attr_names))
return np.asarray(patsy.dmatrix(formula, named_data))
def _equal_freq_discretization(data: np.ndarray,
num_bins: int,
tol: float = 1e-8) -> np.ndarray:
"""Discretize a 1-D numeric array into an equal-frequency histogram."""
perc_interval = 100.0 / num_bins
perc_range = np.arange(perc_interval, 100, perc_interval)
hist_divs = np.percentile(data, perc_range)
# Sometimes the 'hist_divs' is not appropriated.
# For example when all values are constants. It implies in 'hist_divs'
# repetitive values.
# To avoid partitions with the same value, we check if all partitions are
# different. Unfortunately, it leads to a non-equal frequency
# discretization.
aux = len(hist_divs)
diffs = np.append(True, np.diff(hist_divs))
hist_divs = hist_divs[diffs > tol]
if aux != len(hist_divs):
warnings.warn("It is not possible make equal discretization")
hist_divs = np.unique(hist_divs)
return np.digitize(x=data, bins=hist_divs, right=True)
def transform_num(data_numeric: np.ndarray,
num_bins: t.Optional[int] = None) -> t.Optional[np.ndarray]:
"""Discretize numeric data with an equal-frequency histogram.
The index of the histogram bin overwrites its correspondent numeric
values.
Args:
data_numeric (:obj:`np.ndarray`): 2-D numpy array of numeric-
only data to discretize.
num_bins (:obj:`int`, optional): number of bins of the equal-frequen-
cy histogram used to discretize the data. If no value is given,
then the default value is min(2, c), where ``c`` is the cubic root
of the number of instances rounded down.
Returns:
np.ndarray: discretized version of ``data_numeric``.
Raises:
TypeError: if num_bins isn't :obj:`int`.
ValueError: if num_bins is a non-positive value.
"""
if data_numeric.size == 0:
return None
if num_bins is not None:
if not isinstance(num_bins, int):
raise TypeError('"num_bins" must be integer or NoneType.')
if num_bins <= 0:
raise ValueError('"num_bins" must be a positive'
"integer or NoneType.")
num_inst, _ = data_numeric.shape
if not num_bins:
num_bins = int(num_inst**(1/3))
data_numeric = data_numeric.astype(float)
digitalized_data = np.apply_along_axis(
func1d=_equal_freq_discretization,
axis=0,
arr=data_numeric,
num_bins=num_bins)
return digitalized_data
def rescale_data(data: np.ndarray,
option: str,
args: t.Optional[t.Dict[str, t.Any]] = None) -> np.ndarray:
"""Rescale numeric fitted data accordingly to user select option.
Args:
data (:obj:`np.ndarray`): data to rescale.
option (:obj:`str`): rescaling strategy. Must be one in ``VALID_RESCA-
LE`` attribute.
args (:obj:`dict`, optional): additional arguments for the scaler. All
scaler used are from ``sklearn`` package, so you should consult
their documentation for a complete list of available arguments to
user customization. The used scalers for each available ``option``
are:
- ``min-max``: ``sklearn.preprocessing.MinMaxScaler``
- ``standard``: ``sklearn.preprocessing.StandardScale``
- ``robust``: ``sklearn.preprocessing.RobustScaler``
Returns:
np.ndarray: scaled ``data`` based in ``option`` correspondent strategy.
Raises:
ValueError: if ``option`` is not in ``VALID_RESCALE``.
Any exception caused by arguments from ``args`` into the
scaler model is also raised by this function.
"""
if option not in VALID_RESCALE:
raise ValueError('Unknown option "{0}". Please choose one '
"between {1}".format(option, VALID_RESCALE))
if not args:
args = {}
scaler_model = _RESCALE_SCALERS.get(option, "min-max")(**args)
return scaler_model.fit_transform(data.astype(float))
def check_score(score: str, groups: t.Tuple[str, ...]):
"""Checks if a given score is valid.
Args:
score (:obj:`str`): the score metrics name.
groups (:obj:`Tuple` of :obj:`str`): a tuple of feature group names.
Returns:
None
Raises:
ValueError: if ``score`` is not None or ``str``.
ValueError: if ``score`` is not valid.
"""
valid_scoring = {
"accuracy": scoring.accuracy,
"balanced-accuracy": scoring.balanced_accuracy,
"f1": scoring.f1,
"kappa": scoring.kappa,
"auc": scoring.auc,
} # type: t.Dict[str, t.Callable[[np.ndarray, np.ndarray], float]]
if score is not None and not isinstance(score, str):
raise ValueError('"score" is not None or str but "{0}" was passed.'
'The valid values are {1}'.format(
score, list(valid_scoring.keys())))
if "landmarking" in groups:
if score is None:
raise ValueError(
'Landmarking metafeatures need a score metric.'
'One of the following "score" values is required:'
'{0}'.format(list(valid_scoring.keys())))
if score not in valid_scoring:
raise ValueError(
'One of the following "score" values is required:'
'{0}'.format(list(valid_scoring.keys())))
return valid_scoring[score]
return None
def check_group_dependencies(groups: t.Iterable[str]) -> t.Set[str]:
"""Get ``groups`` metafeature groups dependencies."""
deps = set() # type: t.Set[str]
for group in groups:
if group in VALID_GROUPS:
cur_group_index = VALID_GROUPS.index(group)
cur_dep = GROUP_PREREQUISITES[cur_group_index]
if cur_dep:
deps.update({cur_dep} if isinstance(cur_dep, str) else cur_dep)
return deps
def select_results_by_classes(
mtf_names: t.Sequence[str],
class_names: t.Union[str, t.Iterable[str]],
include_dependencies: bool = False) -> t.List[int]:
"""Get indexes of metafeatures related to given ``class_names``."""
if isinstance(class_names, str):
class_names = {class_names}
else:
class_names = set(class_names)
if include_dependencies:
class_names.update(check_group_dependencies(groups=class_names))
classes_mtd_names = set() # type: t.Set[str]
for class_name in class_names:
if class_name in VALID_GROUPS:
_aux = get_prefixed_mtds_from_class(
class_obj=VALID_MFECLASSES[VALID_GROUPS.index(class_name)],
prefix=MTF_PREFIX,
only_name=True,
prefix_removal=True)
classes_mtd_names.update(_aux) # type: ignore
re_parse_mtf_name = re.compile(r"([^\.]+)\.?")
selected_indexes = []
for mtf_cur_index, mtf_cur_name in enumerate(mtf_names):
re_match = re_parse_mtf_name.match(mtf_cur_name)
if re_match and re_match.group(1) in classes_mtd_names:
selected_indexes.append(mtf_cur_index)
return selected_indexes
def post_processing(
results: t.Tuple[t.List, ...],
groups: t.Tuple[str, ...],
suppress_warnings: bool = False,
custom_class_: t.Any = None,
**kwargs) -> None:
"""Detect and apply post-processing methods in metafeatures.
This function should be used after the metafeature extraction.
Args:
results (:obj:`Tuple` or :obj:`np.ndarray`): summarized metafeatures.
This argument has three entries (all must be collections):
- Name of metafeatures
- Value of metafeatures
- Time of extraction for each metafeature
groups (:obj:`Tuple` of :obj:`str`): collection containing one or more
group identifiers. Check out ``MFE`` class documentation for more
information.
suppress_warnings (:obj:`bool`, optional): if True, suppress warnings
invoked while processing precomputation option.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the postprocessing methods.
**kwargs: used to pass extra custom arguments to precomputation metho-
ds.
"""
mtds_metadata = _get_all_prefixed_mtds(
prefix=POSTPROCESS_PREFIX,
groups=groups,
custom_class_=custom_class_,
) # type: t.Dict[str, t.Tuple]
postprocess_mtds = mtds_metadata.get(
"methods", tuple()) # type: t.Tuple[TypeMtdTuple, ...]
del mtds_metadata
remove_groups = False
if "groups" not in kwargs:
remove_groups = True
kwargs["groups"] = groups
mtf_names, mtf_vals, mtf_time = results
extra_inner_args = {
"mtf_names": mtf_names,
"mtf_vals": mtf_vals,
"mtf_time": mtf_time,
"class_indexes": [],
}
for postprocess_mtd_name, postprocess_mtd_callable in postprocess_mtds:
extra_inner_args["class_indexes"] = select_results_by_classes(
mtf_names=mtf_names,
class_names=remove_prefix(value=postprocess_mtd_name,
prefix=POSTPROCESS_PREFIX).split("_"))
try:
new_results = postprocess_mtd_callable( # type: ignore
**extra_inner_args,
**kwargs)
if new_results:
if len(new_results) != len(results):
raise ValueError("Postprocessing result has length '{}'. "
"Expecting '{}'.".format(len(new_results),
len(results)))
for res_list_old, res_list_new in zip(results, new_results):
res_list_old += res_list_new
except (AttributeError, TypeError, ValueError) as type_err:
if not suppress_warnings:
warnings.warn("Something went wrong while "
'postprocessing "{0}". Will ignore '
"this method. Error message:\n"
"{1}.".format(postprocess_mtd_name,
repr(type_err)))
if remove_groups:
kwargs.pop("groups")
|
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from shutil import rmtree
from tempfile import mkdtemp
from nipype.testing import (assert_equal, skipif,
assert_almost_equal, example_data)
import numpy as np
from nipype.algorithms import mesh as m
notvtk = True
import platform
if 'darwin' not in platform.system().lower():
try:
from tvtk.api import tvtk
notvtk = False
except ImportError:
pass
@skipif(notvtk)
def test_ident_distances():
tempdir = mkdtemp()
curdir = os.getcwd()
os.chdir(tempdir)
in_surf = example_data('surf01.vtk')
dist_ident = m.ComputeMeshWarp()
dist_ident.inputs.surface1 = in_surf
dist_ident.inputs.surface2 = in_surf
dist_ident.inputs.out_file = os.path.join(tempdir, 'distance.npy')
res = dist_ident.run()
yield assert_equal, res.outputs.distance, 0.0
dist_ident.inputs.weighting = 'area'
res = dist_ident.run()
yield assert_equal, res.outputs.distance, 0.0
os.chdir(curdir)
rmtree(tempdir)
@skipif(notvtk)
def test_trans_distances():
tempdir = mkdtemp()
in_surf = example_data('surf01.vtk')
warped_surf = os.path.join(tempdir, 'warped.vtk')
curdir = os.getcwd()
os.chdir(tempdir)
inc = np.array([0.7, 0.3, -0.2])
r1 = tvtk.PolyDataReader(file_name=in_surf)
vtk1 = r1.output
r1.update()
vtk1.points = np.array(vtk1.points) + inc
writer = tvtk.PolyDataWriter(file_name=warped_surf)
writer.set_input_data(vtk1)
writer.write()
dist = m.ComputeMeshWarp()
dist.inputs.surface1 = in_surf
dist.inputs.surface2 = warped_surf
dist.inputs.out_file = os.path.join(tempdir, 'distance.npy')
res = dist.run()
yield assert_almost_equal, res.outputs.distance, np.linalg.norm(inc), 4
dist.inputs.weighting = 'area'
res = dist.run()
yield assert_almost_equal, res.outputs.distance, np.linalg.norm(inc), 4
os.chdir(curdir)
rmtree(tempdir)
|
#!/bin/python
import sys, numpy, os.path, re
import argparse
from Bio import SeqIO
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--library_file', help="File containing the library report generated by the script prepare_libraries.py")
parser.add_argument('-p', '--path', required=True, help="Path de Trimmomatic")
parser.add_argument('-j', '--job_output', required=True, help="This is where the script will create the job file")
parser.add_argument('-o', '--output', required=True, help="This where the job file will point for the output of trimmomatic")
parser.add_argument('-c', '--commands', default='LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36', help='User defined commands for trimmomatic')
parser.add_argument('--remove_originals', action='store_true', default=False, help='After trimming the libraries it deletes the original untrimmed files. This option is off by default.')
args = parser.parse_args()'''
def trimming (library_file, path, commands, job_output, output, remove_originals):
paired_list = []
single_list = []
pacbio_list = []
backstring = ''
trimmo_exec = ''
if len(commands) == 0:
commands = 'LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36'
for i in os.listdir(path):
if i[-4:] == '.jar' and i.find('trimmomatic') > -1:
trimmo_exec = path + i
if "All_adapters.fa" not in os.listdir(path+"adapters"):
allada = open(path+"adapters/All_adapters.fa", "w")
for i in os.listdir(path+"adapters"):
for line in i:
allada.write(line[:-1])
allada.write("\n")
allada.close()
for i in open(library_file):
chunk = i.split()
if chunk[5] == "1":
paired_list.append([chunk[0], chunk[6], chunk[4]])
elif chunk[5] == "2": continue
elif chunk[5] == "s":
single_list.append(chunk[0])
elif chunk[5] == "pb":
pacbio_list.append(chunk[0])
else: continue
output_string = ''
to_remove = []
for i in paired_list:
if i[0][0] == '.': i[0] = i[0][1:]
if i[1][0] == '.': i[1] = i[1][1:]
output_string = output_string + "java -jar " + trimmo_exec + " PE -phred" + str(i[2]) + " " + i[0] + " " + i[1] + " "+output+"parsed_paired_"+ \
i[0][i[0].rfind("/")+1:] + " " + output+"parsed_unpaired_"+i[0][i[0].rfind("/")+1:] + " " +output+"parsed_paired_"+i[1][i[1].rfind("/")+1:]\
+ " " + output+"parsed_unpaired_"+i[1][i[1].rfind("/")+1:] + " " +"ILLUMINACLIP:" + path+"adapters/All_adapters.fa" + ":2:30:10 "+commands+"\n"
if remove_originals == True:
to_remove.append(i[0])
to_remove.append(i[1])
for i in single_list:
if i[0] == '.': i = i[1:]
output_string = output_string + "java -jar " + trimmo_exec + " SE -phred" + str(i[2]) + " " + i[0] + " " + i[1] + " " +output+i[0][:i[0].rfind("/")+1]+"parsed_"+\
" ILLUMINACLIP:" + path+"adapters/All_adapters.fa" + ":2:30:10 "+commands+"\n"
if remove_originals == True:
to_remove.append(i[0])
for i in to_remove:
output_string = output_string + "rm " + i + "\n"
output_file = open(job_output, 'w')
output_file.write(output_string)
output_file.close()
#for i in pacbio_list:
|
#!/usr/bin/env python
"""
Sentry-Python - Sentry SDK for Python
=====================================
**Sentry-Python is an SDK for Sentry.** Check out `GitHub
<https://github.com/getsentry/sentry-python>`_ to find out more.
"""
from setuptools import setup, find_packages
setup(
name="sentry-sdk",
version="0.7.8",
author="Sentry Team and Contributors",
author_email="hello@getsentry.com",
url="https://github.com/getsentry/sentry-python",
description="Python client for Sentry (https://getsentry.com)",
long_description=__doc__,
packages=find_packages(exclude=("tests", "tests.*")),
zip_safe=False,
license="BSD",
install_requires=["urllib3", "certifi"],
extras_require={"flask": ["flask>=0.8", "blinker>=1.1"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
import torch.nn.functional as F
from typing import List, Tuple, Any
import numpy as np
__all__ = ["ReLU"]
class ReLU(EquivariantModule):
def __init__(self, in_type: FieldType, inplace: bool = False):
r"""
Module that implements a pointwise ReLU to every channel independently.
The input representation is preserved by this operation and, therefore, it equals the output
representation.
Only representations supporting pointwise non-linearities are accepted as input field type.
Args:
in_type (FieldType): the input field type
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
"""
assert isinstance(in_type.gspace, GeneralOnR2)
super(ReLU, self).__init__()
for r in in_type.representations:
assert 'pointwise' in r.supported_nonlinearities, \
'Error! Representation "{}" does not support "pointwise" non-linearity'.format(r.name)
self.space = in_type.gspace
self.in_type = in_type
# the representation in input is preserved
self.out_type = in_type
self._inplace = inplace
def forward(self, input: GeometricTensor) -> GeometricTensor:
r"""
Applies ReLU function on the input fields
Args:
input (GeometricTensor): the input feature map
Returns:
the resulting feature map after relu has been applied
"""
assert input.type == self.in_type, "Error! the type of the input does not match the input type of this module"
return GeometricTensor(F.relu(input.tensor, inplace=self._inplace), self.out_type)
def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol: float = 1e-6, rtol: float = 1e-5) -> List[Tuple[Any, float]]:
c = self.in_type.size
x = torch.randn(3, c, 10, 10)
x = GeometricTensor(x, self.in_type)
errors = []
for el in self.space.testing_elements:
out1 = self(x).transform_fibers(el)
out2 = self(x.transform_fibers(el))
errs = (out1.tensor - out2.tensor).detach().numpy()
errs = np.abs(errs).reshape(-1)
print(el, errs.max(), errs.mean(), errs.var())
assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), \
'The error found during equivariance check with element "{}" is too high: max = {}, mean = {} var ={}' \
.format(el, errs.max(), errs.mean(), errs.var())
errors.append((el, errs.mean()))
return errors
|
from screenshot_recorder.screenshot_recorder import VideoWindow, VideoConverter, VideoFrameGrabber
|
"""Constants used by the SmartThings component and platforms."""
from datetime import timedelta
import re
DOMAIN = "smartthings"
APP_OAUTH_CLIENT_NAME = "Home Assistant"
APP_OAUTH_SCOPES = ["r:devices:*"]
APP_NAME_PREFIX = "homeassistant."
CONF_APP_ID = "app_id"
CONF_CLOUDHOOK_URL = "cloudhook_url"
CONF_INSTALLED_APP_ID = "installed_app_id"
CONF_INSTANCE_ID = "instance_id"
CONF_LOCATION_ID = "location_id"
CONF_REFRESH_TOKEN = "refresh_token"
DATA_MANAGER = "manager"
DATA_BROKERS = "brokers"
EVENT_BUTTON = "smartthings.button"
SIGNAL_SMARTTHINGS_UPDATE = "smartthings_update"
SIGNAL_SMARTAPP_PREFIX = "smartthings_smartap_"
SETTINGS_INSTANCE_ID = "hassInstanceId"
SUBSCRIPTION_WARNING_LIMIT = 40
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
# Ordered 'specific to least-specific platform' in order for capabilities
# to be drawn-down and represented by the most appropriate platform.
SUPPORTED_PLATFORMS = [
"climate",
"fan",
"light",
"lock",
"cover",
"switch",
"binary_sensor",
"sensor",
"scene",
]
IGNORED_CAPABILITIES = [
"execute",
"healthCheck",
"ocf",
]
TOKEN_REFRESH_INTERVAL = timedelta(days=14)
VAL_UID = "^(?:([0-9a-fA-F]{32})|([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}))$"
VAL_UID_MATCHER = re.compile(VAL_UID)
|
"""
Settings specific to environment behind dev.volontulo.pl.
"""
# pylint: skip=file
from .base import *
# Extra settings go here:
ANGULAR_ROOT = 'https://dev.volontulo.pl'
SYSTEM_DOMAIN = 'dev.volontulo.pl'
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
class mercado (Exchange):
def describe(self):
return self.deep_extend(super(mercado, self).describe(), {
'id': 'mercado',
'name': 'Mercado Bitcoin',
'countries': ['BR'], # Brazil
'rateLimit': 1000,
'version': 'v3',
'has': {
'CORS': True,
'createMarketOrder': False,
'fetchOrder': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27837060-e7c58714-60ea-11e7-9192-f05e86adb83f.jpg',
'api': {
'public': 'https://www.mercadobitcoin.net/api',
'private': 'https://www.mercadobitcoin.net/tapi',
},
'www': 'https://www.mercadobitcoin.com.br',
'doc': [
'https://www.mercadobitcoin.com.br/api-doc',
'https://www.mercadobitcoin.com.br/trade-api',
],
},
'api': {
'public': {
'get': [
'{coin}/orderbook/', # last slash critical
'{coin}/ticker/',
'{coin}/trades/',
'{coin}/trades/{from}/',
'{coin}/trades/{from}/{to}',
'{coin}/day-summary/{year}/{month}/{day}/',
],
},
'private': {
'post': [
'cancel_order',
'get_account_info',
'get_order',
'get_withdrawal',
'list_system_messages',
'list_orders',
'list_orderbook',
'place_buy_order',
'place_sell_order',
'withdraw_coin',
],
},
},
'markets': {
'BTC/BRL': {'id': 'BRLBTC', 'symbol': 'BTC/BRL', 'base': 'BTC', 'quote': 'BRL', 'suffix': 'Bitcoin'},
'LTC/BRL': {'id': 'BRLLTC', 'symbol': 'LTC/BRL', 'base': 'LTC', 'quote': 'BRL', 'suffix': 'Litecoin'},
'BCH/BRL': {'id': 'BRLBCH', 'symbol': 'BCH/BRL', 'base': 'BCH', 'quote': 'BRL', 'suffix': 'BCash'},
'XRP/BRL': {'id': 'BRLXRP', 'symbol': 'XRP/BRL', 'base': 'XRP', 'quote': 'BRL', 'suffix': 'Ripple'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.7 / 100,
},
},
})
def fetch_order_book(self, symbol, limit=None, params={}):
market = self.market(symbol)
orderbook = self.publicGetCoinOrderbook(self.extend({
'coin': market['base'],
}, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
market = self.market(symbol)
response = self.publicGetCoinTicker(self.extend({
'coin': market['base'],
}, params))
ticker = response['ticker']
timestamp = int(ticker['date']) * 1000
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['tid']),
'order': None,
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
method = 'publicGetCoinTrades'
request = {
'coin': market['base'],
}
if since is not None:
method += 'From'
request['from'] = int(since / 1000)
to = self.safe_integer(params, 'to')
if to is not None:
method += 'To'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
response = self.privatePostGetAccountInfo()
balances = response['response_data']['balance']
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
account = self.account()
if lowercase in balances:
account['free'] = float(balances[lowercase]['available'])
account['total'] = float(balances[lowercase]['total'])
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
method = 'privatePostPlace' + self.capitalize(side) + 'Order'
order = {
'coin_pair': self.market_id(symbol),
'quantity': amount,
'limit_price': price,
}
response = getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': str(response['response_data']['order']['order_id']),
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
response = self.privatePostCancelOrder(self.extend({
'coin_pair': market['id'],
'order_id': id,
}, params))
#
# { response_data: {order: { order_id: 2176769,
# coin_pair: "BRLBCH",
# order_type: 2,
# status: 3,
# has_fills: False,
# quantity: "0.10000000",
# limit_price: "1996.15999",
# executed_quantity: "0.00000000",
# executed_price_avg: "0.00000",
# fee: "0.00000000",
# created_timestamp: "1536956488",
# updated_timestamp: "1536956499",
# operations: [] }},
# status_code: 100,
# server_unix_timestamp: "1536956499" }
#
return self.parse_order(response['response_data']['order'], market)
def parse_order_status(self, status):
statuses = {
'2': 'open',
'3': 'canceled',
'4': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "order_id": 4,
# "coin_pair": "BRLBTC",
# "order_type": 1,
# "status": 2,
# "has_fills": True,
# "quantity": "2.00000000",
# "limit_price": "900.00000",
# "executed_quantity": "1.00000000",
# "executed_price_avg": "900.00000",
# "fee": "0.00300000",
# "created_timestamp": "1453838494",
# "updated_timestamp": "1453838494",
# "operations": [
# {
# "operation_id": 1,
# "quantity": "1.00000000",
# "price": "900.00000",
# "fee_rate": "0.30",
# "executed_timestamp": "1453838494",
# },
# ],
# }
#
id = self.safe_string(order, 'order_id')
side = None
if 'order_type' in order:
side = 'buy' if (order['order_type'] == 1) else 'sell'
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'coin_pair')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created_timestamp')
if timestamp is not None:
timestamp = timestamp * 1000
fee = {
'cost': self.safe_float(order, 'fee'),
'currency': market['quote'],
}
price = self.safe_float(order, 'limit_price')
# price = self.safe_float(order, 'executed_price_avg', price)
average = self.safe_float(order, 'executed_price_avg')
amount = self.safe_float(order, 'quantity')
filled = self.safe_float(order, 'executed_quantity')
remaining = amount - filled
cost = amount * average
lastTradeTimestamp = self.safe_integer(order, 'updated_timestamp')
if lastTradeTimestamp is not None:
lastTradeTimestamp = lastTradeTimestamp * 1000
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None, # todo parse trades(operations)
}
return result
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
response = None
response = self.privatePostGetOrder(self.extend({
'coin_pair': market['id'],
'order_id': int(id),
}, params))
return self.parse_order(response['response_data']['order'])
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'quantity': '{:.10f}'.format(amount),
'address': address,
}
if code == 'BRL':
account_ref = ('account_ref' in list(params.keys()))
if not account_ref:
raise ExchangeError(self.id + ' requires account_ref parameter to withdraw ' + code)
elif code != 'LTC':
tx_fee = ('tx_fee' in list(params.keys()))
if not tx_fee:
raise ExchangeError(self.id + ' requires tx_fee parameter to withdraw ' + code)
if code == 'XRP':
if tag is None:
if not('destination_tag' in list(params.keys())):
raise ExchangeError(self.id + ' requires a tag argument or destination_tag parameter to withdraw ' + code)
else:
request['destination_tag'] = tag
response = self.privatePostWithdrawCoin(self.extend(request, params))
return {
'info': response,
'id': response['response_data']['withdrawal']['id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
query = self.omit(params, self.extract_params(path))
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
url += self.version + '/'
nonce = self.nonce()
body = self.urlencode(self.extend({
'tapi_method': path,
'tapi_nonce': nonce,
}, params))
auth = '/tapi/' + self.version + '/' + '?' + body
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'TAPI-ID': self.apiKey,
'TAPI-MAC': self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'error_message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Deserializer
import vsts.git.v4_1.models.git_repository_create_options as git_repository_create_options
from vsts.exceptions import VstsServiceError
from ..base.base_manager import BaseManager
from . import models
from .local_git_utils import (
git_init,
git_add_remote,
git_remove_remote,
git_stage_all,
git_commit,
git_push,
does_git_exist,
does_local_git_repository_exist,
does_git_has_credential_manager,
does_git_remote_exist,
construct_git_remote_name,
construct_git_remote_url
)
class RepositoryManager(BaseManager):
""" Manage DevOps repositories
Attributes:
See BaseManager
"""
def __init__(self, organization_name="", project_name="", creds=None):
base_url = 'https://dev.azure.com'
self._config = Configuration(base_url=base_url)
self._client = ServiceClient(creds, self._config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._deserialize = Deserializer(client_models)
super(RepositoryManager, self).__init__(creds, organization_name=organization_name, project_name=project_name)
@staticmethod
def check_git():
return does_git_exist()
@staticmethod
def check_git_local_repository():
return does_local_git_repository_exist()
@staticmethod
def check_git_credential_manager():
return does_git_has_credential_manager()
# Check if the git repository exists first. If it does, check if the git remote exists.
def check_git_remote(self, repository_name, remote_prefix):
if not does_local_git_repository_exist():
return False
remote_name = construct_git_remote_name(
self._organization_name, self._project_name, repository_name, remote_prefix
)
return does_git_remote_exist(remote_name)
def remove_git_remote(self, repository_name, remote_prefix):
remote_name = construct_git_remote_name(
self._organization_name, self._project_name, repository_name, remote_prefix
)
git_remove_remote(remote_name)
def get_azure_devops_repository_branches(self, repository_name):
try:
result = self._git_client.get_branches(repository_name, self._project_name)
except VstsServiceError:
return []
return result
def get_azure_devops_repository(self, repository_name):
try:
result = self._git_client.get_repository(repository_name, self._project_name)
except VstsServiceError:
return None
return result
def create_repository(self, repository_name):
project = self._get_project_by_name(self._project_name)
git_repo_options = git_repository_create_options.GitRepositoryCreateOptions(
name=repository_name,
project=project
)
return self._git_client.create_repository(git_repo_options)
def list_repositories(self):
return self._git_client.get_repositories(self._project_name)
def list_commits(self, repository_name):
project = self._get_project_by_name(self._project_name)
repository = self._get_repository_by_name(project, repository_name)
return self._git_client.get_commits(repository.id, None, project=project.id)
def get_local_git_remote_name(self, repository_name, remote_prefix):
return construct_git_remote_name(self._organization_name, self._project_name, repository_name, remote_prefix)
# Since the portal url and remote url are same. We only need one function to handle portal access and git push
def get_azure_devops_repo_url(self, repository_name):
return construct_git_remote_url(self._organization_name, self._project_name, repository_name)
# The function will initialize a git repo, create git remote, stage all changes and commit the code
# Exceptions: GitOperationException
def setup_local_git_repository(self, repository_name, remote_prefix):
remote_name = construct_git_remote_name(
self._organization_name, self._project_name, repository_name, remote_prefix
)
remote_url = construct_git_remote_url(self._organization_name, self._project_name, repository_name)
if not does_local_git_repository_exist():
git_init()
git_add_remote(remote_name, remote_url)
git_stage_all()
git_commit("Create function app with azure devops build. Remote repository url: {url}".format(url=remote_url))
# The function will push the current context in local git repository to Azure Devops
# Exceptions: GitOperationException
def push_local_to_azure_devops_repository(self, repository_name, remote_prefix, force):
remote_name = construct_git_remote_name(
self._organization_name, self._project_name, repository_name, remote_prefix
)
git_push(remote_name, force)
|
# Telegram spamer by Oleg Sazonov
# Здесь идет ипморт необходимых функций для работы спамера
from telethon import TelegramClient, connection
from telethon.tl.functions.messages import ImportChatInviteRequest, SendMessageRequest
from telethon.tl.functions.channels import JoinChannelRequest
from telethon.errors import UserAlreadyParticipantError, FloodWaitError, ChatWriteForbiddenError, InviteHashExpiredError, ChannelInvalidError, ChannelPrivateError, ChannelsTooMuchError, InviteHashInvalidError, ChatAdminRequiredError, UsernameInvalidError, ChatRestrictedError, UsernameNotOccupiedError
from telethon.tl.types import InputPeerChannel, InputPeerChat
from asyncio import sleep
from glob import glob
from string import ascii_lowercase
from random import choice
# Открытие файлов с данными
chats = open("chats", "r")
id = open("id", "r")
text = open("text_of_message", "r")
# Инициализация переменных (выведены в начало, чтобы выделить память только в начале работы)
hash = []
spam_api_id = []
spam_api_hash = []
user_type = []
user_link = []
counter = 0
acc_count = 0
def randomString(stringLength=10):
"""Сгенерировать случайную строку определенной длины"""
letters = ascii_lowercase
return ''.join(choice(letters) for i in range(stringLength))
def build_data():
# Здесь происходит сборка таблицы чатов и их идентификаторов
for line in chats:
if line.find("#", 0) != -1:
continue
if line.find("@") != -1:
user_type.append(line.lstrip("@"))
continue
if line.find("joinchat") == -1:
user_link.append(line.lstrip("https://t.me/"))
continue
tmp = line.lstrip('https://t.me/joinchat/')
hash.append(tmp)
for line in id:
if line.find("#") != -1:
continue
tmp = line.split()
spam_api_id.append(tmp[0])
spam_api_hash.append(tmp[1])
txt = ''
for line in text:
if line.find("#") == 0:
continue
txt += line
txt += '\n'
return txt
def login_as_user(api_id, api_hash, count, session='0'):
# Функция для входа под видом пользователя (НЕ бота) в Telegram
if session != '0':
client = TelegramClient(session, spam_api_id[count], spam_api_hash[count])
return client
client = TelegramClient(randomString(3), spam_api_id[count], spam_api_hash[count], connection=connection.ConnectionTcpMTProxyRandomizedIntermediate, proxy=(host, port, sec))
return client
async def main(client, session, acc_count, reklama, counter, user_type, user_link):
# Главная функция рассылки
me = await client.get_me()
print("You are", me.first_name, me.last_name)
for i in range(hash.__len__()):
tmp = hash[i]
tmp = tmp.rstrip()
print("Sending to", tmp)
try:
# Здесь происходит рассылка и отлов ошибок
await client(ImportChatInviteRequest(tmp))
except UserAlreadyParticipantError:
print("User already on channel")
except InviteHashExpiredError:
print("Ссылка на чат протухла, смотрите", i, "позицию")
continue
except InviteHashInvalidError:
print("Битая ссылка на позиции", i)
continue
entity = await client.get_entity("https://t.me/joinchat/" + tmp)
await client(SendMessageRequest(peer=InputPeerChannel(entity.id, entity.access_hash), message=reklama))
await sleep(client.flood_sleep_threshold-10)
for j in range(0, user_type.__len__()):
tmp1 = user_type[j]
print("Sending to ", tmp1)
try:
entity = await client.get_input_entity(tmp1)
except UsernameInvalidError:
print("Неправильное имя пользователя:", j)
continue
try:
await client(JoinChannelRequest(entity))
except ChannelsTooMuchError:
print("Вы должны подчистить каналы на акке", me.username)
return
except ChannelInvalidError:
print("Неправильный канал на позиции", j)
continue
except ChannelPrivateError:
print("Канал закрыт на позиции", j)
continue
except ChatAdminRequiredError:
print("На канал под номером", j, "мы не можем писать")
continue
try:
await client(SendMessageRequest(peer=InputPeerChannel(entity.channel_id, entity.access_hash), message=reklama))
except ChatWriteForbiddenError:
print("Мы не можем писать туда:", j)
continue
except ChatAdminRequiredError:
print("Мы не можем писать туда:", j)
continue
except ChatRestrictedError:
print("Мы не можем писать туда:", j)
continue
await sleep(client.flood_sleep_threshold-10)
for a in range(0, user_link.__len__()):
tmp1 = user_link[a]
print("Sending to:", tmp1)
try:
entity = await client.get_input_entity(tmp1)
except UsernameInvalidError:
print("Неправильное имя пользователя:", a)
continue
except ValueError:
print("Нет такого пользователя", user_link[a])
continue
try:
await client(JoinChannelRequest(entity))
except ChannelsTooMuchError:
print("Вы должны подчистить каналы на акке", me.username)
return
except ChannelInvalidError:
print("Неправильный канал на позиции", a)
continue
except ChannelPrivateError:
print("Канал закрыт на позиции", a)
continue
except ChatAdminRequiredError:
print("На канал под номером", a, "мы не можем писать")
continue
try:
await client(SendMessageRequest(peer=InputPeerChannel(entity.channel_id, entity.access_hash), message=reklama))
except ChatWriteForbiddenError:
print("Мы не можем писать туда:", a)
continue
except ChatAdminRequiredError:
print("Мы не можем писать туда:", a)
continue
except ChatRestrictedError:
print("Мы не можем писать туда:", a)
continue
await sleep(client.flood_sleep_threshold-10)
return
reklama = build_data()
def launch():
# Лаунчер главной функции (необходим для запуска в асинхронном режиме)
session = glob("*.session")
print("Добавляем новые сессии?(1 или 0):")
if int(input()) == 1:
print("Сколько(число):")
i = int(input())
for j in range(0, i):
client = login_as_user(spam_api_id, spam_api_hash, counter)
client.start()
client.disconnect()
with client:
client.loop.run_until_complete(main(client, session, acc_count, reklama, counter, user_type, user_link))
return
client = login_as_user(spam_api_id, spam_api_hash, counter, session=session[0])
with client:
client.loop.run_until_complete(main(client, session, acc_count, reklama, counter, user_type, user_link))
return
if __name__ == "__main__":
# Входная точка
launch()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from webob import exc
from neutron.common import exceptions
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.haproxy import (
plugin_driver
)
from neutron.tests import base
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron.tests.unit import testlib_api
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
def reset_device_driver():
plugin_driver.AgentBasedPluginDriver.device_driver = None
self.addCleanup(reset_device_driver)
self.mock_importer = mock.patch.object(
plugin_driver, 'importutils').start()
self.addCleanup(mock.patch.stopall)
# needed to reload provider configuration
st_db.ServiceTypeManager._instance = None
plugin_driver.AgentBasedPluginDriver.device_driver = 'dummy'
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=('LOADBALANCER:lbaas:neutron.services.'
'loadbalancer.drivers.haproxy.plugin_driver.'
'AgentBasedPluginDriver:default'))
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = plugin_driver.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
self.addCleanup(mock.patch.stopall)
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in xrange(3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 3)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_inactive(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
self.assertRaises(
exceptions.Invalid,
self.callbacks.get_logical_device,
context.get_admin_context(),
pool['pool']['id'])
def test_get_logical_device_active(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# activate objects
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Member, member['member']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id']
)
self.assertEqual(logical_config, expected)
def test_get_logical_device_inactive_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Member,
member['member']['id'],
'INACTIVE')
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
member['member']['status'] = constants.INACTIVE
self.assertEqual([member['member']],
logical_config['members'])
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in expected.iteritems():
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_plug_vip_port_mock_with_host(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f',
'admin_state_up': True,
portbindings.HOST_ID: 'host'
}
with mock.patch.object(
self.plugin._core_plugin, 'update_port') as mock_update_port:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.callbacks.plug_vip_port(
ctx, port_id=vip['vip']['port_id'], host='host')
mock_update_port.assert_called_once_with(
ctx, vip['vip']['port_id'],
{'port': testlib_api.SubDictMatch(exp)})
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
def test_pool_deployed(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('PENDING_CREATE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('PENDING_CREATE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE', m['status'])
self.callbacks.pool_deployed(ctx, pool['pool']['id'])
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('ACTIVE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('ACTIVE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('ACTIVE', m['status'])
def test_update_status_pool(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('PENDING_CREATE', p['status'])
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('ACTIVE', p['status'])
def test_update_status_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
hm_id = hm['health_monitor']['id']
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('PENDING_CREATE', h['status'])
self.callbacks.update_status(
ctx, 'health_monitor',
{'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('ACTIVE', h['status'])
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.addCleanup(mock.patch.stopall)
self.api = plugin_driver.LoadBalancerAgentApi('topic')
self.mock_cast = mock.patch.object(self.api, 'cast').start()
self.mock_msg = mock.patch.object(self.api, 'make_msg').start()
def test_init(self):
self.assertEqual(self.api.topic, 'topic')
def _call_test_helper(self, method_name, method_args):
rv = getattr(self.api, method_name)(mock.sentinel.context,
host='host',
**method_args)
self.assertEqual(rv, self.mock_cast.return_value)
self.mock_cast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic.host',
version=None
)
if method_name == 'agent_updated':
method_args = {'payload': method_args}
self.mock_msg.assert_called_once_with(
method_name,
**method_args
)
def test_agent_updated(self):
self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
def test_create_pool(self):
self._call_test_helper('create_pool', {'pool': 'test',
'driver_name': 'dummy'})
def test_update_pool(self):
self._call_test_helper('update_pool', {'old_pool': 'test',
'pool': 'test'})
def test_delete_pool(self):
self._call_test_helper('delete_pool', {'pool': 'test'})
def test_create_vip(self):
self._call_test_helper('create_vip', {'vip': 'test'})
def test_update_vip(self):
self._call_test_helper('update_vip', {'old_vip': 'test',
'vip': 'test'})
def test_delete_vip(self):
self._call_test_helper('delete_vip', {'vip': 'test'})
def test_create_member(self):
self._call_test_helper('create_member', {'member': 'test'})
def test_update_member(self):
self._call_test_helper('update_member', {'old_member': 'test',
'member': 'test'})
def test_delete_member(self):
self._call_test_helper('delete_member', {'member': 'test'})
def test_create_monitor(self):
self._call_test_helper('create_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
def test_update_monitor(self):
self._call_test_helper('update_pool_health_monitor',
{'old_health_monitor': 'test',
'health_monitor': 'test',
'pool_id': 'test'})
def test_delete_monitor(self):
self._call_test_helper('delete_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(plugin_driver, 'LOG')
api_cls = mock.patch.object(plugin_driver,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.mock_get_driver = mock.patch.object(self.plugin_instance,
'_get_driver')
self.mock_get_driver.return_value = (plugin_driver.
AgentBasedPluginDriver(
self.plugin_instance
))
self.addCleanup(mock.patch.stopall)
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.create_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
ctx = context.get_admin_context()
old_vip = vip['vip'].copy()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.update_vip.assert_called_once_with(
mock.ANY,
old_vip,
new_vip,
'host'
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip:
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
vip['vip']['status'] = 'PENDING_DELETE'
self.mock_api.delete_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_create_pool(self):
with self.pool() as pool:
self.mock_api.create_pool.assert_called_once_with(
mock.ANY,
pool['pool'],
mock.ANY,
'dummy'
)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, orig_pool, 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, orig_pool, updated, 'host')
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
old_pool = pool['pool'].copy()
old_pool['vip_id'] = vip['vip']['id']
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, old_pool, updated, 'host')
def test_delete_pool(self):
with self.pool(no_delete=True) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
pool['pool']['status'] = 'PENDING_DELETE'
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, pool['pool'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
self.mock_api.create_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.update_member.assert_called_once_with(
mock.ANY, member['member'], updated, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
self.mock_api.create_member.reset_mock()
ctx = context.get_admin_context()
old_member = member['member'].copy()
member['member']['pool_id'] = pool2_id
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, old_member, 'host')
self.mock_api.create_member.assert_called_once_with(
mock.ANY, updated, 'host')
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
no_delete=True) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
member['member']['status'] = 'PENDING_DELETE'
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_create_pool_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
hm['pools'][0]['status'] = 'PENDING_DELETE'
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['id'], pool_id)
self.mock_api.delete_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with contextlib.nested(
self.health_monitor(type='HTTP'),
self.pool()
) as (monitor, pool):
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
# hm now has a ref to the pool with which it is associated
ctx = context.get_admin_context()
hm = self.plugin.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
updated = hm.copy()
updated.update(data['health_monitor'])
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.update_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
updated,
pool['pool']['id'],
'host')
|
import salt.modules.win_certutil as certutil
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class CertUtilTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {certutil: {}}
def test_get_serial(self):
"""
Test getting the serial number from a certificate
"""
expected = "180720d39cd2db3244ba037417241e90"
mock = MagicMock(
return_value=(
"CertInfo\r\n"
"Cert Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"\r\n"
"OtherStuff"
)
)
with patch.dict(certutil.__salt__, {"cmd.run": mock}):
out = certutil.get_cert_serial("/path/to/cert.cer")
mock.assert_called_once_with(
"certutil.exe -silent -verify /path/to/cert.cer"
)
self.assertEqual(expected, out)
def test_get_serials(self):
"""
Test getting all the serial numbers from a store
"""
expected = [
"180720d39cd2db3244ba037417241e90",
"1768ac4e5b72bf1d0df0df118b34b959",
]
mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"OtherStuff\r\n"
"\r\n"
"================ Certificate 1 ================\r\n"
"Serial Number: 1768ac4e5b72bf1d0df0df118b34b959\r\n"
"OtherStuff"
)
)
with patch.dict(certutil.__salt__, {"cmd.run": mock}):
out = certutil.get_stored_cert_serials("TrustedPublisher")
mock.assert_called_once_with("certutil.exe -store TrustedPublisher")
self.assertEqual(expected, out)
def test_add_store(self):
"""
Test adding a certificate to a specific store
"""
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"OtherStuff"
)
)
cache_mock = MagicMock(return_value="/tmp/cert.cer")
with patch.dict(
certutil.__salt__, {"cmd.run": cmd_mock, "cp.cache_file": cache_mock}
):
certutil.add_store("salt://path/to/file", "TrustedPublisher")
cmd_mock.assert_called_once_with(
"certutil.exe -addstore TrustedPublisher /tmp/cert.cer"
)
cache_mock.assert_called_once_with("salt://path/to/file", "base")
def test_del_store(self):
"""
Test removing a certificate to a specific store
"""
with patch("salt.modules.win_certutil.get_cert_serial") as cert_serial_mock:
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"OtherStuff"
)
)
cache_mock = MagicMock(return_value="/tmp/cert.cer")
cert_serial_mock.return_value = "ABCDEF"
with patch.dict(
certutil.__salt__, {"cmd.run": cmd_mock, "cp.cache_file": cache_mock}
):
certutil.del_store("salt://path/to/file", "TrustedPublisher")
cmd_mock.assert_called_once_with(
"certutil.exe -delstore TrustedPublisher ABCDEF"
)
cache_mock.assert_called_once_with("salt://path/to/file", "base")
|
from __future__ import unicode_literals
import copy
import datetime
from django.db import models
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from audit_log.models.fields import LastUserField
from audit_log import settings as local_settings
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
class LogEntryObjectDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
kwargs = dict((f.attname, getattr(instance, f.attname))
for f in self.model._meta.fields
if hasattr(instance, f.attname))
return self.model(**kwargs)
class AuditLogManager(models.Manager):
def __init__(self, model, attname, instance = None, ):
super(AuditLogManager, self).__init__()
self.model = model
self.instance = instance
self.attname = attname
#set a hidden attribute on the instance to control wether we should track changes
if instance is not None and not hasattr(instance, '__is_%s_enabled'%attname):
setattr(instance, '__is_%s_enabled'%attname, True)
def enable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, True)
def disable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, False)
def is_tracking_enabled(self):
if local_settings.DISABLE_AUDIT_LOG:
return False
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
return getattr(self.instance, '__is_%s_enabled'%self.attname)
def get_queryset(self):
if self.instance is None:
return super(AuditLogManager, self).get_queryset()
f = {self.instance._meta.pk.name : self.instance.pk}
return super(AuditLogManager, self).get_queryset().filter(**f)
class AuditLogDescriptor(object):
def __init__(self, model, manager_class, attname):
self.model = model
self.manager_class = manager_class
self.attname = attname
def __get__(self, instance, owner):
if instance is None:
return self.manager_class(self.model, self.attname)
return self.manager_class(self.model, self.attname, instance)
class AuditLog(object):
manager_class = AuditLogManager
def __init__(self, exclude = []):
self._exclude = exclude
def contribute_to_class(self, cls, name):
self.manager_name = name
models.signals.class_prepared.connect(self.finalize, sender = cls)
def create_log_entry(self, instance, action_type):
manager = getattr(instance, self.manager_name)
attrs = {}
for field in instance._meta.fields:
if field.attname not in self._exclude:
attrs[field.attname] = getattr(instance, field.attname)
manager.create(action_type = action_type, **attrs)
def post_save(self, instance, created, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, created and 'I' or 'U')
def post_delete(self, instance, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, 'D')
def finalize(self, sender, **kwargs):
log_entry_model = self.create_log_entry_model(sender)
models.signals.post_save.connect(self.post_save, sender = sender, weak = False)
models.signals.post_delete.connect(self.post_delete, sender = sender, weak = False)
descriptor = AuditLogDescriptor(log_entry_model, self.manager_class, self.manager_name)
setattr(sender, self.manager_name, descriptor)
def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__' : model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField
if field.primary_key:
field.serialize = True
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if field.remote_field and field.remote_field.related_name:
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.related_name
)
elif field.remote_field:
try:
if field.remote_field.get_accessor_name():
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.get_accessor_name()
)
except e:
pass
fields[field.name] = field
return fields
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except AttributeError:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
}
def get_meta_options(self, model):
"""
Returns a dictionary of Meta options for the
autdit log model.
"""
result = {
'ordering' : ('-action_date',),
'app_label' : model._meta.app_label,
}
from django.db.models.options import DEFAULT_NAMES
if 'default_permissions' in DEFAULT_NAMES:
result.update({'default_permissions': ()})
return result
def create_log_entry_model(self, model):
"""
Creates a log entry model that will be associated with
the model provided.
"""
attrs = self.copy_fields(model)
attrs.update(self.get_logging_fields(model))
attrs.update(Meta = type(str('Meta'), (), self.get_meta_options(model)))
name = str('%sAuditLogEntry'%model._meta.object_name)
return type(name, (models.Model,), attrs)
|
#!/usr/bin/env python
"""
This script extracts all strings from a supplied PDF file.
Running strings against PDF files is not always helpful, because interesting values
like URLs and JavaScript can be encoded so they are not human-readable.
This script works around that by first decoding all text inside of the PDF file
so that the strings are human-readable. This also has the benefit of not including
strings that are not displayed to the user.
"""
import sys
import PyPDF2
def get_strings(fpath):
texts = []
pdf = PyPDF2.PdfFileReader(fpath)
for page_num, page in enumerate(pdf.pages):
texts.append(page.extractText())
extracted_text = ("\n" + "*"*80 + "\n").join(texts)
return extracted_text.encode('utf-8', errors="replace")
def main():
if len(sys.argv) < 2:
print "USAGE: %s %s <filename>" % (sys.executable, sys.argv[0])
sys.exit(1)
fpath = sys.argv[1]
print get_strings(fpath)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import Command
import recalboxFiles
from generators.Generator import Generator
import os.path
import glob
class DosBoxGenerator(Generator):
def getResolution(self, config):
return 'default'
# Main entry of the module
# Return command
def generate(self, system, rom, playersControllers, gameResolution):
# Find rom path
gameDir = rom
batFile = gameDir + "/dosbox.bat"
gameConfFile = gameDir + "/dosbox.cfg"
commandArray = [recalboxFiles.recalboxBins[system.config['emulator']],
"-userconf",
"-exit",
"""{}""".format(batFile),
"-c", """set ROOT={}""".format(gameDir)]
if os.path.isfile(gameConfFile):
commandArray.append("-conf")
commandArray.append("""{}""".format(gameConfFile))
else:
commandArray.append("-conf")
commandArray.append("""{}""".format(recalboxFiles.dosboxConfig))
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
return Command.Command(array=commandArray, env={"SDL_VIDEO_GL_DRIVER":"/usr/lib/libGLESv2.so"})
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.icsp import ICSPBaseActions
import eventlet
class GetJobStatus(ICSPBaseActions):
def run(self, job_id, monitor, monitor_interval=120,
connection_details=None):
self.set_connection(connection_details)
self.get_sessionid()
output = {}
endpoint = "/rest/os-deployment-jobs"
if monitor and not job_id:
raise ValueError("Unable to proceed. Monitor \
feature requires a single Job ID")
if job_id:
endpoint = endpoint + "/%s" % (job_id)
jobs = self.icsp_get(endpoint)
# Single Job ID doesn't have Members element
if not job_id:
for job in jobs['members']:
jobid = self.extract_id(job["uri"])
output[jobid] = job['state']
else:
status = jobs['state']
if monitor:
jobid = self.extract_id(jobs["uri"])
while status == "STATUS_ACTIVE":
eventlet.sleep(monitor_interval)
jobs = self.icsp_get(endpoint)
status = jobs['state']
if status == 'STATUS_SUCCESS':
output[jobid] = jobs['state']
else:
raise Exception("%s: %s" % (jobid, status))
else:
jobid = self.extract_id(jobs["uri"])
output[jobid] = jobs['state']
return {"jobs": output}
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.0.0
from troposphere import Tags
from . import AWSObject, AWSProperty
class Application(AWSObject):
resource_type = "AWS::ServiceCatalogAppRegistry::Application"
props = {
"Description": (str, False),
"Name": (str, True),
"Tags": (Tags, False),
}
class Attributes(AWSProperty):
props = {}
class AttributeGroup(AWSObject):
resource_type = "AWS::ServiceCatalogAppRegistry::AttributeGroup"
props = {
"Attributes": (Attributes, True),
"Description": (str, False),
"Name": (str, True),
"Tags": (Tags, False),
}
class AttributeGroupAssociation(AWSObject):
resource_type = "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation"
props = {
"Application": (str, True),
"AttributeGroup": (str, True),
}
class ResourceAssociation(AWSObject):
resource_type = "AWS::ServiceCatalogAppRegistry::ResourceAssociation"
props = {
"Application": (str, True),
"Resource": (str, True),
"ResourceType": (str, True),
}
|
import os
from dramatiq.middleware import TimeLimit
def path_to(*paths):
return os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
*paths,
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "a"
# SECURITY WARNING: don"t run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_dramatiq",
"tests.testapp1",
"tests.testapp2",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "tests.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"common.context_processors.settings",
],
},
},
]
WSGI_APPLICATION = "django_dramatiq.wsgi.application"
# Database
# ========
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "db.sqlite3",
}
}
# Queue
# =====
DRAMATIQ_BROKER = {
"BROKER": "dramatiq.brokers.stub.StubBroker",
"OPTIONS": {},
"MIDDLEWARE": [
"dramatiq.middleware.AgeLimit",
TimeLimit(time_limit=36000000),
"dramatiq.middleware.Retries",
"django_dramatiq.middleware.AdminMiddleware",
"django_dramatiq.middleware.DbConnectionsMiddleware",
]
}
# Auth
# ====
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BioLinear(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
delta: float = 0.05,
ranking_param: int = 2,
lebesgue_p: int = 3,
device=None,
dtype=None,
) -> None:
"""
Analogue of a nn.Linear layer implementing the bio-inspired Hebbian learning rule from [1].
Automatically flattens the input, and applies batch normalization (without any learnable parameters,
so that it does not require backprop). Use the method `training_step` to perform a single weights update.
[1]: "Unsupervised learning by competing hidden units", D. Krotov, J. J. Hopfield, 2019,
https://www.pnas.org/content/116/16/7723
Parameters
----------
in_features : int
Input dimension of the Linear layer
out_features : int
Output dimension of the Linear layer
bias : bool
delta : float
Strength of anti-Hebbian learning (from eq. 9 in [1]).
lebesgue_p : float
Parameter for Lebesgue measure, used for defining an inner product (from eq. 2 in [1]).
ranking_param: int
Rank of the current to which anti-hebbian learning is applied. Should be >= 2. This is the `k` from eq. 10 in [1].
device
dtype
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.delta = delta
self.ranking_param = ranking_param
self.lebesgue_p = lebesgue_p
self.weight = nn.Parameter(
torch.randn(out_features, in_features, device=device, dtype=dtype),
requires_grad=False,
)
self.bias = None
if bias:
self.bias = nn.Parameter(
torch.randn(
out_features,
),
requires_grad=False,
)
self.batch_norm = nn.BatchNorm1d(
self.in_features, affine=False, device=device, dtype=dtype
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute output of the layer (forward pass).
Parameters
----------
x : torch.Tensor
Input. Expected to be of shape (batch_size, ...), where ... denotes an arbitrary
sequence of dimensions, with product equal to in_features.
"""
self.batch_norm.eval()
x = x.view(x.size(0), -1) # Auto flatten
return F.linear(
self.batch_norm(x),
torch.sign(self.weight) * torch.abs(self.weight) ** (self.lebesgue_p - 1),
self.bias,
)
def delta_weights(self, x: torch.Tensor, weights: torch.Tensor) -> torch.Tensor:
"""
Compute the change of `weights` given by the Krotov learning rule (eq. 3 from [1], with R=1)
for a given batch of `x`. See "A fast implementation" section in [1].
The formula is:
```delta_weights = g(currents) @ x - normalization_mtx (*) weights
currents = (sgn(weights) (*) abs(weights) ** lebesgue_p) @ x.T```
where `normalization_mtx` is a matrix of the same shape of `weights`, with all columns equal to:
```\sum_{batches} [g(currents) (*) currents]```
The symbol `@` denotes matrix multiplication, while `(*)` is element-wise multiplication (Hadamard product).
Finally, the function `g` (eq. 10 in [1]) returns:
```g(currents[i,j]) = 1 if currents[i,j] is the highest in the j-th column (sample),
-Delta if currets[i,j] is the k-th highest value in the j-th column (sample),
0 otherwise```
Parameters
----------
x : torch.Tensor
Input. Expected to be of shape (batch_size, in_features).
weights : torch.Tensor of shape (output_size, input_size)
Model's weights
Returns
-------
delta_weights : torch.Tensor of shape (output_size, input_size)
Change of weights given by the fast implementation of Krotov learning rule. The tensor is normalized
so that its maximum is equal to 1.
"""
batch_size = x.shape[0]
# ---Currents---#
x = torch.t(x) # Shape is (batch_size, input_size) -> (input_size, batch_size)
currents = torch.matmul(
torch.sign(weights) * torch.abs(weights) ** (self.lebesgue_p - 1), x
) # Shape is (output_size, batch_size)
# ---Activations---#
_, ranking_indices = currents.topk(
self.ranking_param, dim=0
) # Shape is (self.ranking_param, batch_size)
# Indices of the top k currents produced by each input sample
post_activation_currents = torch.zeros_like(
currents
) # Shape is (output_size, batch_size)
# Computes g(currents)
# Note that all activations are 0, except the largest current (activation of 1) and the k-th largest (activation of -delta)
batch_indices = torch.arange(batch_size, device=post_activation_currents.device)
post_activation_currents[ranking_indices[0], batch_indices] = 1.0
post_activation_currents[
ranking_indices[self.ranking_param - 1], batch_indices
] = -self.delta
# ---Compute change of weights---#
delta_weights = torch.matmul(
post_activation_currents, torch.t(x)
) # Overlap between post_activation_currents and inputs
second_term = torch.sum(
torch.mul(post_activation_currents, currents), dim=1
) # Overlap between currents and post_activation_currents
# Results are summed over batches, resulting in a shape of (output_size,)
delta_weights = delta_weights - second_term.unsqueeze(1) * weights
# ---Normalize---#
nc = torch.abs(delta_weights).amax() # .amax(1, keepdim=True)
delta_weights.div_(nc + 1e-5)
return delta_weights # Maximum (absolute) change of weight is set to +1.
def training_step(self, x: torch.Tensor, learning_rate: float = 0.1) -> float:
"""Apply the "BioLearn" rule to update the weights, according to:
``self.weights += learning_rate * delta_weights```
Parameters
----------
x : torch.Tensor
Input. Expected to be of shape (batch_size, ...), where ... denotes an arbitrary
sequence of dimensions, with product equal to in_features.
learning_rate : float, optional
Learning rate, by default .1
Returns
-------
convergence : float
Weights should converge so that:
```torch.sum(torch.abs(weights) ** self.lebesgue_p, axis=1)```
is a vector of ones.
As a metric of convergence, the maximum absolute deviation from 1. is returned:
```convergence = torch.max(torch.abs(norm - torch.ones_like(norm))).cpu().numpy()```
"""
self.batch_norm.train()
x = x.view(x.size(0), -1) # Auto flatten
x = self.batch_norm(x)
weights = self.weight
if self.bias is not None:
# print(x.shape, x[:,0].shape)
x = torch.cat(
[
torch.ones(
(x.size(0), 1), dtype=self.bias.dtype, device=self.bias.device
),
x,
],
dim=1,
)
weights = torch.cat([self.bias.unsqueeze(1), self.weight], dim=1)
delta_weights = self.delta_weights(x, weights)
self.weight.add_(learning_rate * delta_weights[:, 1:]) # problem here
self.bias.add_(learning_rate * delta_weights[:, 0].squeeze())
else:
delta_weights = self.delta_weights(x, weights)
self.weight.add_(learning_rate * delta_weights)
# p-norm of weights should converge to 1
weights = weights.view(weights.size(0), -1)
norm = torch.sum(torch.abs(weights) ** self.lebesgue_p, axis=1)
convergence = torch.max(torch.abs(norm - torch.ones_like(norm))).cpu().numpy()
return convergence
def __str__(self) -> str:
"""String representation for the layer."""
return (
"BioLinear(\n"
+ f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias}, "
+ f"lebesgue_p={self.lebesgue_p}, ranking_param={self.ranking_param}, delta={self.delta}\n)"
)
|
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
#: This mode of transport distribution is based on the report
#: :cite:p:`isl.2015.umschlagpotenzial`.
#: The exact data for transshipment and hinterland share is taken from page 22, Figure 12
#: "Containerumschlag des Hafens Hamburg in TEU / Marktsegment 2013".
#: The modal split of the hinterland is updated based on the figures presented by
#: :cite:t:`hafen.hamburg.2020.modal.split`.
#: After those adaptions, still there were several imbalances.
#: Thus, some traffic was shifted from deep sea vessels to feeders by adding/subtracting some constants.
#: In summary, this is an educated guess based on several sources.
DEFAULT_MODE_OF_TRANSPORT_DISTRIBUTION = {
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.8 / (0.8 + 4.6) + 0.15,
ModeOfTransport.deep_sea_vessel: 4.6 / (0.8 + 4.6) - 0.15
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.8 / (0.8 + 4.6) + 0.15,
ModeOfTransport.deep_sea_vessel: 4.6 / (0.8 + 4.6) - 0.15
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.8 / (0.8 + 4.6),
ModeOfTransport.deep_sea_vessel: 4.6 / (0.8 + 4.6)
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.8 / (0.8 + 1.9) * 0.502,
ModeOfTransport.train: 0.8 / (0.8 + 1.9) * 0.47,
ModeOfTransport.barge: 0.8 / (0.8 + 1.9) * 0.0028,
ModeOfTransport.feeder: 0,
ModeOfTransport.deep_sea_vessel: 1.9 / (0.8 + 1.9)
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 4.6 / (4.6 + 1.9) * 0.502,
ModeOfTransport.train: 4.6 / (4.6 + 1.9) * 0.47,
ModeOfTransport.barge: 4.6 / (4.6 + 1.9) * 0.0028,
ModeOfTransport.feeder: 1.9 / (4.6 + 1.9),
ModeOfTransport.deep_sea_vessel: 0
}
}
def seed():
repository = ModeOfTransportDistributionRepository()
repository.set_mode_of_transport_distributions(DEFAULT_MODE_OF_TRANSPORT_DISTRIBUTION)
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import *
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView, View
from .models import *
from .forms import *
from django.utils import timezone
from django.contrib import messages
from django.shortcuts import render, get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from paypal.standard.forms import PayPalPaymentsForm
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
import random
# Create your views here.
@login_required()
def item_list(request):
context = {
'item': Item.objects.all()
}
return render(request, 'homepage.html', context)
@login_required()
def item_list(request):
context = {
'item': Item.objects.all()
}
return render(request, 'homepage.html', context)
@login_required()
def order_summary(request):
try:
order = Order.objects.get(user=request.user,ordered=False)
return render(request,'order_summary.html',{'object':order})
except ObjectDoesNotExist:
messages.warning(self.request,"You do not have an active order")
@login_required()
def total_order(request):
if request.method == 'POST':
amount = request.POST.get('price')
paypal_dict = {
"business":settings.PAYPAL_RECEIVER_EMAIL,
"amount":amount,
"item_name":'Products',
"invoice":str(random.randint(000,999)),
"currency_code":'USD',
"notify_url":' https://salonspa.herokuapp.com/go/',
"return_url":'https://salonspa.herokuapp.com/payment-done',
"cancel_return":'https://salonspa.herokuapp.com/payment-cancelled',
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request,'payment.html',{"form":form})
else:
messages.info(request,'Your payment was not successful, please try again')
return render(request,'payment.html',{"form":form})
@login_required()
def jenga_payment(request):
if request.method == 'POST':
price = request.POST.get('amount')
jenga_dict = {
'token': 'xccuUjuysdsnvtloPWqiT',
'merchantCode':str(random.randint(000,999)),
'merchant': 'MerchantXYZ',
'outletCode':str(random.randint(0,9) for iter in range(10)),
'amount': 'price',
'ez1_callback':'https/domain/'
}
@csrf_exempt
def paypal_return(request):
return render(request,"paypal_return.html")
class HomeView(ListView):
model = Item
paginated_by = 10
template_name = 'homepage.html'
class ItemDetailView(DetailView):
model = Item
template_name = 'product.html'
@login_required()
def posts(request):
return render(request, 'index.html')
def what_we_do(request):
all_posts = Salonposts.objects.all()
comments = Comments.objects.all()
return render(request, 'posts.html', {'post': all_posts, 'comments': comments})
@login_required()
def add_comments(request, id):
'''
view function that renders one post and has a comment section
'''
if request.method == 'POST':
form = AddCommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
post = Salonposts.objects.get(id=id)
comment.post = post
comment.save()
return redirect('comment', id=id)
else:
form = AddCommentForm()
post = Salonposts.get_one_post(id)
posts = Salonposts.objects.get(id=id)
comment = Comments.objects.filter(post=post.id)
return render(request, 'comment.html', {'form': form, 'post': post, 'comments': comment})
@login_required()
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(
item=item,
user=request.user,
ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, "This item quantity was updated.")
return redirect('mysalon:order-summary')
else:
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("mysalon:detail", slug=slug)
else:
ordered_date = timezone.now()
order = Order.objects.create(
user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.info(request, "This item was added to your cart.")
return redirect("mysalon:detail", slug=slug)
@login_required()
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
order.items.remove(order_item)
messages.info(request, "This item was removed from your cart.")
return redirect('mysalon:order-summary')
else:
messages.info(request, "This item was not in your cart")
return redirect('mysalon:detail', slug=slug)
else:
messages.info(request, "You do not have an active order")
return redirect('mysalon:detail', slug=slug)
@login_required()
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request, "This item quantity was updated.")
return redirect('mysalon:order-summary')
else:
messages.info(request, "This item was not in your cart")
return redirect('mysalon:detail', slug=slug)
else:
messages.info(request, "You do not have an active order")
return redirect('mysalon:detail', slug=slug)
@login_required()
def user_dashboard(request):
"""
function for displaying dashboard
"""
return render(request, 'dashboard.html')
@login_required()
def registered_users(request):
users = User.objects.all()
context = {
'users': users
}
return render(request, 'users.html', context)
@login_required()
def user_deactivate(request, user_id):
user = User.objects.get(pk=user_id)
user.is_active = False
user.save()
messages.success(request, "User account has been successfully deactivated!")
return redirect('system_users')
@login_required()
def user_activate(request, user_id):
user = User.objects.get(pk=user_id)
user.is_active = True
user.save()
messages.success(request, "User account has been successfully activated!")
return redirect('system_users')
def create_appointment(request):
user = request.user
if request.method == 'POST':
booked = Appointment.objects.all()
for book in booked:
if Appointment.user == request.user:
messages.info(request,"You have a pending appointment")
return redirect('mysalon:home')
email = request.POST.get('email')
number = request.POST.get('phone')
date = request.POST.get('appointment')
service = request.POST.get('service')
if email and number and date and service:
your_appointment = Appointment(user = request.user,email=email,contact=number,date=date,service=service)
your_appointment.save()
messages.info(request,"Your appointment has been scheduled on")
return redirect('mysalon:home')
else:
messages.info(request,"Input all fields")
return redirect('mysalon:appointment')
else:
messages.info(request,'Invalid Inputs, try again')
return render(request,"appointments.html")
|
"""
functions.py - Miscellaneous functions with no other home
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import division
import decimal
import math
import re
import struct
import sys
import warnings
from collections import OrderedDict
import numpy as np
from . import Qt, debug, reload
from .metaarray import MetaArray
from .Qt import QT_LIB, QtCore, QtGui
from .util.cupy_helper import getCupy
from .util.numba_helper import getNumbaFunctions
# in order of appearance in this file.
# add new functions to this list only if they are to reside in pg namespace.
__all__ = [
'siScale', 'siFormat', 'siParse', 'siEval', 'siApply',
'Color', 'mkColor', 'mkBrush', 'mkPen', 'hsvColor',
'CIELabColor', 'colorCIELab', 'colorDistance',
'colorTuple', 'colorStr', 'intColor', 'glColor',
'makeArrowPath', 'eq',
'affineSliceCoords', 'affineSlice',
'interweaveArrays', 'interpolateArray', 'subArray',
'transformToArray', 'transformCoordinates',
'solve3DTransform', 'solveBilinearTransform',
'clip_scalar', 'clip_array', 'rescaleData', 'applyLookupTable',
'makeRGBA', 'makeARGB',
# 'try_fastpath_argb', 'ndarray_to_qimage',
'makeQImage',
# 'ndarray_from_qimage',
'imageToArray', 'colorToAlpha',
'gaussianFilter', 'downsample', 'arrayToQPath',
# 'ndarray_from_qpolygonf', 'create_qpolygonf', 'arrayToQPolygonF',
'isocurve', 'traceImage', 'isosurface',
'invertQTransform',
'pseudoScatter', 'toposort', 'disconnect', 'SignalBlock']
Colors = {
'b': QtGui.QColor(0,0,255,255),
'g': QtGui.QColor(0,255,0,255),
'r': QtGui.QColor(255,0,0,255),
'c': QtGui.QColor(0,255,255,255),
'm': QtGui.QColor(255,0,255,255),
'y': QtGui.QColor(255,255,0,255),
'k': QtGui.QColor(0,0,0,255),
'w': QtGui.QColor(255,255,255,255),
'd': QtGui.QColor(150,150,150,255),
'l': QtGui.QColor(200,200,200,255),
's': QtGui.QColor(100,100,150,255),
}
SI_PREFIXES = 'yzafpnµm kMGTPEZY'
SI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'
SI_PREFIX_EXPONENTS = dict([(SI_PREFIXES[i], (i-8)*3) for i in range(len(SI_PREFIXES))])
SI_PREFIX_EXPONENTS['u'] = -6
FLOAT_REGEX = re.compile(r'(?P<number>[+-]?((((\d+(\.\d*)?)|(\d*\.\d+))([eE][+-]?\d+)?)|((?i:nan)|(inf))))\s*((?P<siPrefix>[u' + SI_PREFIXES + r']?)(?P<suffix>\w.*))?$')
INT_REGEX = re.compile(r'(?P<number>[+-]?\d+)\s*(?P<siPrefix>[u' + SI_PREFIXES + r']?)(?P<suffix>.*)$')
def siScale(x, minVal=1e-25, allowUnicode=True):
"""
Return the recommended scale factor and SI prefix string for x.
Example::
siScale(0.0001) # returns (1e6, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if not math.isfinite(x):
return(1, '')
except:
raise
if abs(x) < minVal:
m = 0
else:
m = int(clip_scalar(math.floor(math.log(abs(x))/math.log(1000)), -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
m1 = -3*m
p = 10.**m1
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):
"""
Return the number x formatted in engineering notation with SI prefix.
Example::
siFormat(0.0001, suffix='V') # returns "100 μV"
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + "±" + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))
def siParse(s, regex=FLOAT_REGEX, suffix=None):
"""Convert a value written in SI notation to a tuple (number, si_prefix, suffix).
Example::
siParse('100 µV") # returns ('100', 'µ', 'V')
Note that in the above example, the µ symbol is the "micro sign" (UTF-8
0xC2B5), as opposed to the Greek letter mu (UTF-8 0xCEBC).
Parameters
----------
s : str
The string to parse.
regex : re.Pattern, optional
Compiled regular expression object for parsing. The default is a
general-purpose regex for parsing floating point expressions,
potentially containing an SI prefix and a suffix.
suffix : str, optional
Suffix to check for in ``s``. The default (None) indicates there may or
may not be a suffix contained in the string and it is returned if
found. An empty string ``""`` is handled differently: if the string
contains a suffix, it is discarded. This enables interpreting
characters following the numerical value as an SI prefix.
"""
s = s.strip()
if suffix is not None and len(suffix) > 0:
if s[-len(suffix):] != suffix:
raise ValueError("String '%s' does not have the expected suffix '%s'" % (s, suffix))
s = s[:-len(suffix)] + 'X' # add a fake suffix so the regex still picks up the si prefix
# special case: discard any extra characters if suffix is explicitly empty
if suffix == "":
s += 'X'
m = regex.match(s)
if m is None:
raise ValueError('Cannot parse number "%s"' % s)
try:
sip = m.group('siPrefix')
except IndexError:
sip = ''
if suffix is None:
try:
suf = m.group('suffix')
except IndexError:
suf = ''
else:
suf = suffix
return m.group('number'), '' if sip is None else sip, '' if suf is None else suf
def siEval(s, typ=float, regex=FLOAT_REGEX, suffix=None):
"""
Convert a value written in SI notation to its equivalent prefixless value.
Example::
siEval("100 μV") # returns 0.0001
"""
val, siprefix, suffix = siParse(s, regex, suffix=suffix)
v = typ(val)
return siApply(v, siprefix)
def siApply(val, siprefix):
"""
"""
n = SI_PREFIX_EXPONENTS[siprefix] if siprefix != '' else 0
if n > 0:
return val * 10**n
elif n < 0:
# this case makes it possible to use Decimal objects here
return val / 10**-n
else:
return val
class Color(QtGui.QColor):
def __init__(self, *args):
QtGui.QColor.__init__(self, mkColor(*args))
def glColor(self):
"""Return (r,g,b,a) normalized for use in opengl"""
return self.getRgbF()
def __getitem__(self, ind):
return (self.red, self.green, self.blue, self.alpha)[ind]()
def mkColor(*args):
"""
Convenience function for constructing QColor from a variety of argument
types. Accepted arguments are:
================ ================================================
'c' one of: r, g, b, c, m, y, k, w
R, G, B, [A] integers 0-255
(R, G, B, [A]) tuple of integers 0-255
float greyscale, 0.0-1.0
int see :func:`intColor() <pyqtgraph.intColor>`
(int, hues) see :func:`intColor() <pyqtgraph.intColor>`
"#RGB" hexadecimal strings prefixed with '#'
"#RGBA" previously allowed use without prefix is deprecated and
"#RRGGBB" will be removed in 0.13
"#RRGGBBAA"
QColor QColor instance; makes a copy.
================ ================================================
"""
err = 'Not sure how to make a color from "%s"' % str(args)
if len(args) == 1:
if isinstance(args[0], str):
c = args[0]
if len(c) == 1:
try:
return Colors[c]
except KeyError:
raise ValueError('No color named "%s"' % c)
have_alpha = len(c) in [5, 9] and c[0] == '#' # "#RGBA" and "#RRGGBBAA"
if not have_alpha:
# try parsing SVG named colors, including "#RGB" and "#RRGGBB".
# note that QColor.setNamedColor() treats a 9-char hex string as "#AARRGGBB".
qcol = QtGui.QColor()
qcol.setNamedColor(c)
if qcol.isValid():
return qcol
# on failure, fallback to pyqtgraph parsing
# this includes the deprecated case of non-#-prefixed hex strings
if c[0] == '#':
c = c[1:]
else:
warnings.warn(
"Parsing of hex strings that do not start with '#' is"
"deprecated and support will be removed in 0.13",
DeprecationWarning, stacklevel=2
)
if len(c) == 3:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = 255
elif len(c) == 4:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = int(c[3]*2, 16)
elif len(c) == 6:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = 255
elif len(c) == 8:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = int(c[6:8], 16)
else:
raise ValueError(f"Unknown how to convert string {c} to color")
elif isinstance(args[0], QtGui.QColor):
return QtGui.QColor(args[0])
elif np.issubdtype(type(args[0]), np.floating):
r = g = b = int(args[0] * 255)
a = 255
elif hasattr(args[0], '__len__'):
if len(args[0]) == 3:
r, g, b = args[0]
a = 255
elif len(args[0]) == 4:
r, g, b, a = args[0]
elif len(args[0]) == 2:
return intColor(*args[0])
else:
raise TypeError(err)
elif np.issubdtype(type(args[0]), np.integer):
return intColor(args[0])
else:
raise TypeError(err)
elif len(args) == 3:
r, g, b = args
a = 255
elif len(args) == 4:
r, g, b, a = args
else:
raise TypeError(err)
args = [int(a) if np.isfinite(a) else 0 for a in (r, g, b, a)]
return QtGui.QColor(*args)
def mkBrush(*args, **kwds):
"""
| Convenience function for constructing Brush.
| This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`
| Calling mkBrush(None) returns an invisible brush.
"""
if 'color' in kwds:
color = kwds['color']
elif len(args) == 1:
arg = args[0]
if arg is None:
return QtGui.QBrush(QtCore.Qt.BrushStyle.NoBrush)
elif isinstance(arg, QtGui.QBrush):
return QtGui.QBrush(arg)
else:
color = arg
elif len(args) > 1:
color = args
return QtGui.QBrush(mkColor(color))
def mkPen(*args, **kargs):
"""
Convenience function for constructing QPen.
Examples::
mkPen(color)
mkPen(color, width=2)
mkPen(cosmetic=False, width=4.5, color='r')
mkPen({'color': "#FF0", width: 2})
mkPen(None) # (no pen)
In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` """
color = kargs.get('color', None)
width = kargs.get('width', 1)
style = kargs.get('style', None)
dash = kargs.get('dash', None)
cosmetic = kargs.get('cosmetic', True)
hsv = kargs.get('hsv', None)
if len(args) == 1:
arg = args[0]
if isinstance(arg, dict):
return mkPen(**arg)
if isinstance(arg, QtGui.QPen):
return QtGui.QPen(arg) ## return a copy of this pen
elif arg is None:
style = QtCore.Qt.PenStyle.NoPen
else:
color = arg
if len(args) > 1:
color = args
if color is None:
color = mkColor('l')
if hsv is not None:
color = hsvColor(*hsv)
else:
color = mkColor(color)
pen = QtGui.QPen(QtGui.QBrush(color), width)
pen.setCosmetic(cosmetic)
if style is not None:
pen.setStyle(style)
if dash is not None:
pen.setDashPattern(dash)
# for width > 1.0, we are drawing many short segments to emulate a
# single polyline. the default SquareCap style causes artifacts.
# these artifacts can be avoided by using RoundCap.
# this does have a performance penalty, so enable it only
# for thicker line widths where the artifacts are visible.
if width > 4.0:
pen.setCapStyle(QtCore.Qt.PenCapStyle.RoundCap)
return pen
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):
"""Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)"""
return QtGui.QColor.fromHsvF(hue, sat, val, alpha)
# Matrices and math taken from "CIELab Color Space" by Gernot Hoffmann
# http://docs-hoffmann.de/cielab03022003.pdf
MATRIX_XYZ_FROM_RGB = np.array( (
( 0.4124, 0.3576, 0.1805),
( 0.2126, 0.7152, 0.0722),
( 0.0193, 0.1192, 0.9505) ) )
MATRIX_RGB_FROM_XYZ = np.array( (
( 3.2410,-1.5374,-0.4985),
(-0.9692, 1.8760, 0.0416),
( 0.0556,-0.2040, 1.0570) ) )
VECTOR_XYZn = np.array( ( 0.9505, 1.0000, 1.0891) ) # white reference at illuminant D65
def CIELabColor(L, a, b, alpha=1.0):
"""
Generates as QColor from CIE L*a*b* values.
Parameters
----------
L: float
Lightness value ranging from 0 to 100
a, b: float
(green/red) and (blue/yellow) coordinates, typically -127 to +127.
alpha: float, optional
Opacity, ranging from 0 to 1
Notes
-----
The CIE L*a*b* color space parametrizes color in terms of a luminance `L`
and the `a` and `b` coordinates that locate the hue in terms of
a "green to red" and a "blue to yellow" axis.
These coordinates seek to parametrize human color preception in such a way
that the Euclidean distance between the coordinates of two colors represents
the visual difference between these colors. In particular, the difference
ΔE = sqrt( (L1-L2)² + (a1-a2)² + (b1-b2)² ) = 2.3
is considered the smallest "just noticeable difference" between colors.
This simple equation represents the CIE76 standard. Later standards CIE94
and CIE2000 refine the difference calculation ΔE, while maintaining the
L*a*b* coordinates.
Alternative (and arguably more accurate) methods exist to quantify color
difference, but the CIELab color space remains a convenient approximation.
Under a known illumination, assumed to be white standard illuminant D65
here, a CIELab color induces a response in the human eye
that is described by the tristimulus value XYZ. Once this is known, an
sRGB color can be calculated to induce the same response.
More information and underlying mathematics can be found in e.g.
"CIELab Color Space" by Gernot Hoffmann, available at
http://docs-hoffmann.de/cielab03022003.pdf .
Also see :func:`colorDistance() <pyqtgraph.colorDistance>`.
"""
# convert to tristimulus XYZ values
vec_XYZ = np.full(3, ( L +16)/116 ) # Y1 = (L+16)/116
vec_XYZ[0] += a / 500 # X1 = (L+16)/116 + a/500
vec_XYZ[2] -= b / 200 # Z1 = (L+16)/116 - b/200
for idx, val in enumerate(vec_XYZ):
if val > 0.20689:
vec_XYZ[idx] = vec_XYZ[idx]**3
else:
vec_XYZ[idx] = (vec_XYZ[idx] - 16/116) / 7.787
vec_XYZ = VECTOR_XYZn * vec_XYZ # apply white reference
# print(f'XYZ: {vec_XYZ}')
# convert XYZ to linear RGB
vec_RGB = MATRIX_RGB_FROM_XYZ @ vec_XYZ
# gamma-encode linear RGB
arr_sRGB = np.zeros(3)
for idx, val in enumerate( vec_RGB[:3] ):
if val > 0.0031308: # (t) RGB value for linear/exponential transition
arr_sRGB[idx] = 1.055 * val**(1/2.4) - 0.055
else:
arr_sRGB[idx] = 12.92 * val # (s)
arr_sRGB = clip_array( arr_sRGB, 0.0, 1.0 ) # avoid QColor errors
return QtGui.QColor.fromRgbF( *arr_sRGB, alpha )
def colorCIELab(qcol):
"""
Describes a QColor by an array of CIE L*a*b* values.
Also see :func:`CIELabColor() <pyqtgraph.CIELabColor>` .
Parameters
----------
qcol: QColor
QColor to be converted
Returns
-------
NumPy array
Color coordinates `[L, a, b]`.
"""
srgb = qcol.getRgbF()[:3] # get sRGB values from QColor
# convert gamma-encoded sRGB to linear:
vec_RGB = np.zeros(3)
for idx, val in enumerate( srgb ):
if val > (12.92 * 0.0031308): # coefficients (s) * (t)
vec_RGB[idx] = ((val+0.055)/1.055)**2.4
else:
vec_RGB[idx] = val / 12.92 # (s) coefficient
# converted linear RGB to tristimulus XYZ:
vec_XYZ = MATRIX_XYZ_FROM_RGB @ vec_RGB
# normalize with white reference and convert to L*a*b* values
vec_XYZ1 = vec_XYZ / VECTOR_XYZn
for idx, val in enumerate(vec_XYZ1):
if val > 0.008856:
vec_XYZ1[idx] = vec_XYZ1[idx]**(1/3)
else:
vec_XYZ1[idx] = 7.787*vec_XYZ1[idx] + 16/116
vec_Lab = np.array([
116 * vec_XYZ1[1] - 16, # Y1
500 * (vec_XYZ1[0] - vec_XYZ1[1]), # X1 - Y1
200 * (vec_XYZ1[1] - vec_XYZ1[2])] ) # Y1 - Z1
return vec_Lab
def colorDistance(colors, metric='CIE76'):
"""
Returns the perceptual distances between a sequence of QColors.
See :func:`CIELabColor() <pyqtgraph.CIELabColor>` for more information.
Parameters
----------
colors: list of QColor
Two or more colors to calculate the distances between.
metric: string, optional
Metric used to determined the difference. Only 'CIE76' is supported at this time,
where a distance of 2.3 is considered a "just noticeable difference".
The default may change as more metrics become available.
Returns
-------
List
The `N-1` sequential distances between `N` colors.
"""
metric = metric.upper()
if len(colors) < 1: return np.array([], dtype=np.float)
if metric == 'CIE76':
dist = []
lab1 = None
for col in colors:
lab2 = colorCIELab(col)
if lab1 is None: #initialize on first element
lab1 = lab2
continue
dE = math.sqrt( np.sum( (lab1-lab2)**2 ) )
dist.append(dE)
lab1 = lab2
return np.array(dist)
raise ValueError(f'Metric {metric} is not available.')
def colorTuple(c):
"""Return a tuple (R,G,B,A) from a QColor"""
return c.getRgb()
def colorStr(c):
"""Generate a hex string code from a QColor"""
return ('%02x'*4) % colorTuple(c)
def intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255):
"""
Creates a QColor from a single index. Useful for stepping through a predefined list of colors.
The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be
Colors are chosen by cycling across hues while varying the value (brightness).
By default, this selects from a list of 9 hues."""
hues = int(hues)
values = int(values)
ind = int(index) % (hues * values)
indh = ind % hues
indv = ind // hues
if values > 1:
v = minValue + indv * ((maxValue-minValue) // (values-1))
else:
v = maxValue
h = minHue + (indh * (maxHue-minHue)) // hues
return QtGui.QColor.fromHsv(h, sat, v, alpha)
def glColor(*args, **kargs):
"""
Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0
Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.
"""
c = mkColor(*args, **kargs)
return c.getRgbF()
def makeArrowPath(headLen=20, headWidth=None, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *headWidth* is supplied, it overrides *tipAngle* (in degrees).
If *tailLen* is None, no tail will be drawn.
"""
if headWidth is None:
headWidth = headLen * math.tan(math.radians(tipAngle * 0.5))
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * math.tan(math.radians(baseAngle))
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * math.tan(math.radians(baseAngle))
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation to a single bool value.
This function has some important differences from the == operator:
1. Returns True if a IS b, even if a==b still evaluates to False.
2. While a is b will catch the case with np.nan values, special handling is done for distinct
float('nan') instances using math.isnan.
3. Tests for equivalence using ==, but silently ignores some common exceptions that can occur
(AtrtibuteError, ValueError).
4. When comparing arrays, returns False if the array shapes are not the same.
5. When comparing arrays of the same shape, returns True only if all elements are equal (whereas
the == operator would return a boolean array).
6. Collections (dict, list, etc.) must have the same type to be considered equal. One
consequence is that comparing a dict to an OrderedDict will always return False.
"""
if a is b:
return True
# The above catches np.nan, but not float('nan')
if isinstance(a, float) and isinstance(b, float):
if math.isnan(a) and math.isnan(b):
return True
# Avoid comparing large arrays against scalars; this is expensive and we know it should return False.
aIsArr = isinstance(a, (np.ndarray, MetaArray))
bIsArr = isinstance(b, (np.ndarray, MetaArray))
if (aIsArr or bIsArr) and type(a) != type(b):
return False
# If both inputs are arrays, we can speeed up comparison if shapes / dtypes don't match
# NOTE: arrays of dissimilar type should be considered unequal even if they are numerically
# equal because they may behave differently when computed on.
if aIsArr and bIsArr and (a.shape != b.shape or a.dtype != b.dtype):
return False
# Recursively handle common containers
if isinstance(a, dict) and isinstance(b, dict):
if type(a) != type(b) or len(a) != len(b):
return False
if set(a.keys()) != set(b.keys()):
return False
for k, v in a.items():
if not eq(v, b[k]):
return False
if isinstance(a, OrderedDict) or sys.version_info >= (3, 7):
for a_item, b_item in zip(a.items(), b.items()):
if not eq(a_item, b_item):
return False
return True
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
if type(a) != type(b) or len(a) != len(b):
return False
for v1,v2 in zip(a, b):
if not eq(v1, v2):
return False
return True
# Test for equivalence.
# If the test raises a recognized exception, then return Falase
try:
try:
# Sometimes running catch_warnings(module=np) generates AttributeError ???
catcher = warnings.catch_warnings(module=np) # ignore numpy futurewarning (numpy v. 1.10)
catcher.__enter__()
except Exception:
catcher = None
e = a==b
except (ValueError, AttributeError):
return False
except:
print('failed to evaluate equivalence for:')
print(" a:", str(type(a)), str(a))
print(" b:", str(type(b)), str(b))
raise
finally:
if catcher is not None:
catcher.__exit__(None, None, None)
t = type(e)
if t is bool:
return e
elif t is np.bool_:
return bool(e)
elif isinstance(e, np.ndarray) or (hasattr(e, 'implements') and e.implements('MetaArray')):
try: ## disaster: if a is an empty array and b is not, then e.all() is True
if a.shape != b.shape:
return False
except:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise TypeError("== operator returned type %s" % str(type(e)))
def affineSliceCoords(shape, origin, vectors, axes):
"""Return the array of coordinates used to sample data arrays in affineSlice().
"""
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = list(map(np.ceil, shape))
## make sure vectors are arrays
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
if not isinstance(origin, np.ndarray):
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
x += origin
return x
def affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):
"""
Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays
such as MRI images for viewing as 1D or 2D data.
The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is
possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger
datasets. The original data is interpolated onto a new array of coordinates using either interpolateArray if order<2
or scipy.ndimage.map_coordinates otherwise.
For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`
============== ====================================================================================================
**Arguments:**
*data* (ndarray) the original dataset
*shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))
*origin* the location in the original dataset that will become the origin of the sliced data.
*vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same
length as *axes*. If the vectors are not unit length, the result will be scaled relative to the
original data. If the vectors are not orthogonal, the result will be sheared relative to the
original data.
*axes* The axes in the original dataset which correspond to the slice *vectors*
*order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates
for more information.
*returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select
values from the original dataset.
*All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*
--------------------------------------------------------------------------------------------------------------------
============== ====================================================================================================
Note the following must be true:
| len(shape) == len(vectors)
| len(origin) == len(axes) == len(vectors[i])
Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes
* data = array with dims (time, x, y, z) = (100, 40, 40, 40)
* The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
* The origin of the slice will be at (x,y,z) = (40, 0, 0)
* We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
The call for this example would look like::
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
"""
x = affineSliceCoords(shape, origin, vectors, axes)
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for ax in axes:
trAx.remove(ax)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
if order > 1:
try:
import scipy.ndimage
except ImportError:
raise ImportError("Interpolating with order > 1 requires the scipy.ndimage module, but it could not be imported.")
# iterate manually over unused axes since map_coordinates won't do it for us
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)
else:
# map_coordinates expects the indexes as the first axis, whereas
# interpolateArray expects indexes at the last axis.
tr = tuple(range(1, x.ndim)) + (0,)
output = interpolateArray(data, x.transpose(tr), order=order)
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
## Untranspose array before returning
output = output.transpose(tr2)
if returnCoords:
return (output, x)
else:
return output
def interweaveArrays(*args):
"""
Parameters
----------
args : numpy.ndarray
series of 1D numpy arrays of the same length and dtype
Returns
-------
numpy.ndarray
A numpy array with all the input numpy arrays interwoven
Examples
--------
>>> result = interweaveArrays(numpy.ndarray([0, 2, 4]), numpy.ndarray([1, 3, 5]))
>>> result
array([0, 1, 2, 3, 4, 5])
"""
size = sum(x.size for x in args)
result = np.empty((size,), dtype=args[0].dtype)
n = len(args)
for index, array in enumerate(args):
result[index::n] = array
return result
def interpolateArray(data, x, default=0.0, order=1):
"""
N-dimensional interpolation similar to scipy.ndimage.map_coordinates.
This function returns linearly-interpolated values sampled from a regular
grid of data. It differs from `ndimage.map_coordinates` by allowing broadcasting
within the input array.
============== ===========================================================================================
**Arguments:**
*data* Array of any shape containing the values to be interpolated.
*x* Array with (shape[-1] <= data.ndim) containing the locations within *data* to interpolate.
(note: the axes for this argument are transposed relative to the same argument for
`ndimage.map_coordinates`).
*default* Value to return for locations in *x* that are outside the bounds of *data*.
*order* Order of interpolation: 0=nearest, 1=linear.
============== ===========================================================================================
Returns array of shape (x.shape[:-1] + data.shape[x.shape[-1]:])
For example, assume we have the following 2D image data::
>>> data = np.array([[1, 2, 4 ],
[10, 20, 40 ],
[100, 200, 400]])
To compute a single interpolated point from this data::
>>> x = np.array([(0.5, 0.5)])
>>> interpolateArray(data, x)
array([ 8.25])
To compute a 1D list of interpolated locations::
>>> x = np.array([(0.5, 0.5),
(1.0, 1.0),
(1.0, 2.0),
(1.5, 0.0)])
>>> interpolateArray(data, x)
array([ 8.25, 20. , 40. , 55. ])
To compute a 2D array of interpolated locations::
>>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],
[(1.0, 1.0), (1.5, 0.0)]])
>>> interpolateArray(data, x)
array([[ 8.25, 40. ],
[ 20. , 55. ]])
..and so on. The *x* argument may have any shape as long as
```x.shape[-1] <= data.ndim```. In the case that
```x.shape[-1] < data.ndim```, then the remaining axes are simply
broadcasted as usual. For example, we can interpolate one location
from an entire row of the data::
>>> x = np.array([[0.5]])
>>> interpolateArray(data, x)
array([[ 5.5, 11. , 22. ]])
This is useful for interpolating from arrays of colors, vertexes, etc.
"""
if order not in (0, 1):
raise ValueError("interpolateArray requires order=0 or 1 (got %s)" % order)
prof = debug.Profiler()
nd = data.ndim
md = x.shape[-1]
if md > nd:
raise TypeError("x.shape[-1] must be less than or equal to data.ndim")
totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes
if order == 0:
xinds = np.round(x).astype(int) # NOTE: for 0.5 this rounds to the nearest *even* number
for ax in range(md):
mask = (xinds[...,ax] >= 0) & (xinds[...,ax] <= data.shape[ax]-1)
xinds[...,ax][~mask] = 0
# keep track of points that need to be set to default
totalMask &= mask
result = data[tuple([xinds[...,i] for i in range(xinds.shape[-1])])]
elif order == 1:
# First we generate arrays of indexes that are needed to
# extract the data surrounding each point
fields = np.mgrid[(slice(0,order+1),) * md]
xmin = np.floor(x).astype(int)
xmax = xmin + 1
indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])
fieldInds = []
for ax in range(md):
mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1)
# keep track of points that need to be set to default
totalMask &= mask
# ..and keep track of indexes that are out of bounds
# (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out
# of bounds, but the interpolation will work anyway)
mask &= (xmax[...,ax] < data.shape[ax])
axisIndex = indexes[...,ax][fields[ax]]
axisIndex[axisIndex < 0] = 0
axisIndex[axisIndex >= data.shape[ax]] = 0
fieldInds.append(axisIndex)
prof()
# Get data values surrounding each requested point
fieldData = data[tuple(fieldInds)]
prof()
## Interpolate
s = np.empty((md,) + fieldData.shape, dtype=float)
dx = x - xmin
# reshape fields for arithmetic against dx
for ax in range(md):
f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))
sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])
sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))
s[ax] = sax
s = np.product(s, axis=0)
result = fieldData * s
for i in range(md):
result = result.sum(axis=0)
prof()
if totalMask.ndim > 0:
result[~totalMask] = default
else:
if totalMask is False:
result[:] = default
prof()
return result
def subArray(data, offset, shape, stride):
"""
Unpack a sub-array from *data* using the specified offset, shape, and stride.
Note that *stride* is specified in array elements, not bytes.
For example, we have a 2x3 array packed in a 1D array as follows::
data = [_, _, 00, 01, 02, _, 10, 11, 12, _]
Then we can unpack the sub-array with this call::
subArray(data, offset=2, shape=(2, 3), stride=(4, 1))
..which returns::
[[00, 01, 02],
[10, 11, 12]]
This function operates only on the first axis of *data*. So changing
the input in the example above to have shape (10, 7) would cause the
output to have shape (2, 3, 7).
"""
data = np.ascontiguousarray(data)[offset:]
shape = tuple(shape)
extraShape = data.shape[1:]
strides = list(data.strides[::-1])
itemsize = strides[-1]
for s in stride[1::-1]:
strides.append(itemsize * s)
strides = tuple(strides[::-1])
return np.ndarray(buffer=data, shape=shape+extraShape, strides=strides, dtype=data.dtype)
def transformToArray(tr):
"""
Given a QTransform, return a 3x3 numpy array.
Given a QMatrix4x4, return a 4x4 numpy array.
Example: map an array of x,y coordinates through a transform::
## coordinates to map are (1,5), (2,6), (3,7), and (4,8)
coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work
## Make an example transform
tr = QtGui.QTransform()
tr.translate(3,4)
tr.scale(2, 0.1)
## convert to array
m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation
## map coordinates through transform
mapped = np.dot(m, coords)
"""
#return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])
## The order of elements given by the method names m11..m33 is misleading--
## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in
## a transformation matrix. However, with QTransform these values appear at m31 and m32.
## So the correct interpretation is transposed:
if isinstance(tr, QtGui.QTransform):
return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])
elif isinstance(tr, QtGui.QMatrix4x4):
return np.array(tr.copyDataTo()).reshape(4,4)
else:
raise Exception("Transform argument must be either QTransform or QMatrix4x4.")
def transformCoordinates(tr, coords, transpose=False):
"""
Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.
The shape of coords must be (2,...) or (3,...)
The mapping will _ignore_ any perspective transformations.
For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.
Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To
allow this, use transpose=True.
"""
if transpose:
## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.
coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))
nd = coords.shape[0]
if isinstance(tr, np.ndarray):
m = tr
else:
m = transformToArray(tr)
m = m[:m.shape[0]-1] # remove perspective
## If coords are 3D and tr is 2D, assume no change for Z axis
if m.shape == (2,3) and nd == 3:
m2 = np.zeros((3,4))
m2[:2, :2] = m[:2,:2]
m2[:2, 3] = m[:2,2]
m2[2,2] = 1
m = m2
## if coords are 2D and tr is 3D, ignore Z axis
if m.shape == (3,4) and nd == 2:
m2 = np.empty((2,3))
m2[:,:2] = m[:2,:2]
m2[:,2] = m[:2,3]
m = m2
## reshape tr and coords to prepare for multiplication
m = m.reshape(m.shape + (1,)*(coords.ndim-1))
coords = coords[np.newaxis, ...]
# separate scale/rotate and translation
translate = m[:,-1]
m = m[:, :-1]
## map coordinates and return
# nan or inf points will not plot, but should not generate warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
mapped = (m*coords).sum(axis=1) ## apply scale/rotate
mapped += translate
if transpose:
## move first axis to end.
mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))
return mapped
def solve3DTransform(points1, points2):
"""
Find a 3D transformation matrix that maps points1 onto points2.
Points must be specified as either lists of 4 Vectors or
(4, 3) arrays.
"""
import numpy.linalg
pts = []
for inp in (points1, points2):
if isinstance(inp, np.ndarray):
A = np.empty((4,4), dtype=float)
A[:,:3] = inp[:,:3]
A[:,3] = 1.0
else:
A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])
pts.append(A)
## solve 3 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((4,4))
for i in range(3):
## solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i])
return matrix
def solveBilinearTransform(points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2.
Points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
import numpy.linalg
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def clip_scalar(val, vmin, vmax):
""" convenience function to avoid using np.clip for scalar values """
return vmin if val < vmin else vmax if val > vmax else val
# umath.clip was slower than umath.maximum(umath.minimum).
# See https://github.com/numpy/numpy/pull/20134 for details.
_win32_clip_workaround_needed = (
sys.platform == 'win32' and
tuple(map(int, np.__version__.split(".")[:2])) < (1, 22)
)
def clip_array(arr, vmin, vmax, out=None):
# replacement for np.clip due to regression in
# performance since numpy 1.17
# https://github.com/numpy/numpy/issues/14281
if vmin is None and vmax is None:
# let np.clip handle the error
return np.clip(arr, vmin, vmax, out=out)
if vmin is None:
return np.core.umath.minimum(arr, vmax, out=out)
elif vmax is None:
return np.core.umath.maximum(arr, vmin, out=out)
elif _win32_clip_workaround_needed:
if out is None:
out = np.empty(arr.shape, dtype=np.find_common_type([arr.dtype], [type(vmax)]))
out = np.core.umath.minimum(arr, vmax, out=out)
return np.core.umath.maximum(out, vmin, out=out)
else:
return np.core.umath.clip(arr, vmin, vmax, out=out)
def _rescaleData_nditer(data_in, scale, offset, work_dtype, out_dtype, clip):
"""Refer to documentation for rescaleData()"""
data_out = np.empty_like(data_in, dtype=out_dtype)
# integer clip operations are faster than float clip operations
# so test to see if we can perform integer clipping
fits_int32 = False
if data_in.dtype.kind in 'ui' and out_dtype.kind in 'ui':
# estimate whether data range after rescale will fit within an int32.
# this means that the input dtype should be an 8-bit or 16-bit integer type.
# casting to an int32 will lose the fractional part, therefore the
# output dtype must be an integer kind.
lim_in = np.iinfo(data_in.dtype)
# convert numpy scalar to python scalar to avoid overflow warnings
lo = offset.item(0) if isinstance(offset, np.number) else offset
dst_bounds = scale * (lim_in.min - lo), scale * (lim_in.max - lo)
if dst_bounds[1] < dst_bounds[0]:
dst_bounds = dst_bounds[1], dst_bounds[0]
lim32 = np.iinfo(np.int32)
fits_int32 = lim32.min < dst_bounds[0] and dst_bounds[1] < lim32.max
it = np.nditer([data_in, data_out],
flags=['external_loop', 'buffered'],
op_flags=[['readonly'], ['writeonly', 'no_broadcast']],
op_dtypes=[None, work_dtype],
casting='unsafe',
buffersize=32768)
with it:
for x, y in it:
y[...] = x
y -= offset
y *= scale
# Clip before converting dtype to avoid overflow
if clip is not None:
if fits_int32:
# converts to int32, clips back to float32
np.core.umath.clip(y.astype(np.int32), clip[0], clip[1], out=y)
else:
clip_array(y, clip[0], clip[1], out=y)
return data_out
def rescaleData(data, scale, offset, dtype=None, clip=None):
"""Return data rescaled and optionally cast to a new dtype.
The scaling operation is::
data => (data-offset) * scale
"""
if dtype is None:
out_dtype = data.dtype
else:
out_dtype = np.dtype(dtype)
if out_dtype.kind in 'ui':
lim = np.iinfo(out_dtype)
if clip is None:
# don't let rescale cause integer overflow
clip = lim.min, lim.max
clip = max(clip[0], lim.min), min(clip[1], lim.max)
# make clip limits integer-valued (no need to cast to int)
# this improves performance, especially on Windows
clip = [math.trunc(x) for x in clip]
if np.can_cast(data, np.float32):
work_dtype = np.float32
else:
work_dtype = np.float64
cp = getCupy()
if cp and cp.get_array_module(data) == cp:
# Cupy does not support nditer
# https://github.com/cupy/cupy/issues/5021
data_out = data.astype(work_dtype, copy=True)
data_out -= offset
data_out *= scale
# Clip before converting dtype to avoid overflow
if clip is not None:
clip_array(data_out, clip[0], clip[1], out=data_out)
# don't copy if no change in dtype
return data_out.astype(out_dtype, copy=False)
numba_fn = getNumbaFunctions()
if numba_fn and clip is not None:
# if we got here by makeARGB(), clip will not be None at this point
return numba_fn.rescaleData(data, scale, offset, out_dtype, clip)
return _rescaleData_nditer(data, scale, offset, work_dtype, out_dtype, clip)
def applyLookupTable(data, lut):
"""
Uses values in *data* as indexes to select values from *lut*.
The returned data has shape data.shape + lut.shape[1:]
Note: color gradient lookup tables can be generated using GradientWidget.
Parameters
----------
data : ndarray
lut : ndarray
Either cupy or numpy arrays are accepted, though this function has only
consistently behaved correctly on windows with cuda toolkit version >= 11.1.
"""
if data.dtype.kind not in ('i', 'u'):
data = data.astype(int)
cp = getCupy()
if cp and cp.get_array_module(data) == cp:
# cupy.take only supports "wrap" mode
return cp.take(lut, cp.clip(data, 0, lut.shape[0] - 1), axis=0)
else:
return np.take(lut, data, axis=0, mode='clip')
def makeRGBA(*args, **kwds):
"""Equivalent to makeARGB(..., useRGBA=True)"""
kwds['useRGBA'] = True
return makeARGB(*args, **kwds)
def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False, maskNans=True, output=None):
"""
Convert an array of values into an ARGB array suitable for building QImages,
OpenGL textures, etc.
Returns the ARGB array (unsigned byte) and a boolean indicating whether
there is alpha channel data. This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a
lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 255 if no lookup table is provided.
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Lookup tables can be built using ColorMap or GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the *actual* order
is BGRA).
maskNans Enable or disable masking NaNs as transparent.
============== ==================================================================================
"""
cp = getCupy()
xp = cp.get_array_module(data) if cp else np
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, xp.ndarray):
lut = xp.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = xp.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = xp.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = xp.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, xp.ndarray):
levels = xp.array(levels)
levels = levels.astype(xp.float64)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile('check inputs')
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0]
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = xp.ubyte
else:
dtype = xp.min_scalar_type(lut.shape[0]-1)
# awkward, but fastest numpy native nan evaluation
nanMask = None
if maskNans and data.dtype.kind == 'f' and xp.isnan(data.min()):
nanMask = xp.isnan(data)
if data.ndim > 2:
nanMask = xp.any(nanMask, axis=-1)
# Apply levels if given
if levels is not None:
if isinstance(levels, xp.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = xp.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal = xp.nextafter(maxVal, 2*maxVal)
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
newData[...,i] = rescaleData(data[...,i], scale / rng, minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal = xp.nextafter(maxVal, 2*maxVal)
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
data = rescaleData(data, scale/rng, minVal, dtype=dtype)
profile('apply levels')
# apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype != xp.ubyte:
data = xp.clip(data, 0, 255).astype(xp.ubyte)
profile('apply lut')
# this will be the final image array
if output is None:
imgData = xp.empty(data.shape[:2]+(4,), dtype=xp.ubyte)
else:
imgData = output
profile('allocate')
# decide channel order
if useRGBA:
dst_order = [0, 1, 2, 3] # R,G,B,A
elif sys.byteorder == 'little':
dst_order = [2, 1, 0, 3] # B,G,R,A (ARGB32 little endian)
else:
dst_order = [1, 2, 3, 0] # A,R,G,B (ARGB32 big endian)
# copy data into image array
fastpath = try_fastpath_argb(xp, data, imgData, useRGBA)
if fastpath:
pass
elif data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., xp.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., dst_order[i]] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., dst_order[i]] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., dst_order[i]] = data[..., i]
profile('reorder channels')
# add opaque alpha channel if needed
if data.ndim == 3 and data.shape[2] == 4:
alpha = True
else:
alpha = False
if not fastpath: # fastpath has already filled it in
imgData[..., dst_order[3]] = 255
# apply nan mask through alpha channel
if nanMask is not None:
alpha = True
# Workaround for https://github.com/cupy/cupy/issues/4693
if xp == cp:
imgData[nanMask, :, dst_order[3]] = 0
else:
imgData[nanMask, dst_order[3]] = 0
profile('alpha channel')
return imgData, alpha
def try_fastpath_argb(xp, ain, aout, useRGBA):
# we only optimize for certain cases
# return False if we did not handle it
can_handle = xp is np and ain.dtype == xp.ubyte and ain.flags['C_CONTIGUOUS']
if not can_handle:
return False
nrows, ncols = ain.shape[:2]
nchans = 1 if ain.ndim == 2 else ain.shape[2]
Format = QtGui.QImage.Format
if nchans == 1:
in_fmt = Format.Format_Grayscale8
elif nchans == 3:
in_fmt = Format.Format_RGB888
else:
in_fmt = Format.Format_RGBA8888
if useRGBA:
out_fmt = Format.Format_RGBA8888
else:
out_fmt = Format.Format_ARGB32
if in_fmt == out_fmt:
aout[:] = ain
return True
npixels_chunk = 512*1024
batch = int(npixels_chunk / ncols / nchans)
batch = max(1, batch)
row_beg = 0
while row_beg < nrows:
row_end = min(row_beg + batch, nrows)
ain_view = ain[row_beg:row_end, ...]
aout_view = aout[row_beg:row_end, ...]
qimg = QtGui.QImage(ain_view, ncols, ain_view.shape[0], ain.strides[0], in_fmt)
qimg = qimg.convertToFormat(out_fmt)
aout_view[:] = imageToArray(qimg, copy=False, transpose=False)
row_beg = row_end
return True
def ndarray_to_qimage(arr, fmt):
"""
Low level function to encapsulate QImage creation differences between bindings.
"arr" is assumed to be C-contiguous.
"""
# C++ QImage has two kind of constructors
# - QImage(const uchar*, ...)
# - QImage(uchar*, ...)
# If the const constructor is used, subsequently calling any non-const method
# will trigger the COW mechanism, i.e. a copy is made under the hood.
if QT_LIB.startswith('PyQt'):
# PyQt5 -> non-const
# PyQt6 >= 6.0.1 -> non-const
img_ptr = int(Qt.sip.voidptr(arr)) # or arr.ctypes.data
else:
# bindings that support ndarray
# PyQt5 -> const
# PyQt6 >= 6.0.1 -> const
# PySide2 -> non-const
# PySide6 -> non-const
img_ptr = arr
h, w = arr.shape[:2]
bytesPerLine = arr.strides[0]
qimg = QtGui.QImage(img_ptr, w, h, bytesPerLine, fmt)
qimg.data = arr
return qimg
def makeQImage(imgData, alpha=None, copy=True, transpose=True):
"""
Turn an ARGB array into QImage.
By default, the data is copied; changes to the array will not
be reflected in the image. The image will be given a 'data' attribute
pointing to the array which shares its data to prevent python
freeing that memory while the image is in use.
============== ===================================================================
**Arguments:**
imgData Array of data to convert. Must have shape (height, width),
(height, width, 3), or (height, width, 4). If transpose is
True, then the first two axes are swapped. The array dtype
must be ubyte. For 2D arrays, the value is interpreted as
greyscale. For 3D arrays, the order of values in the 3rd
axis must be (b, g, r, a).
alpha If the input array is 3D and *alpha* is True, the QImage
returned will have format ARGB32. If False,
the format will be RGB32. By default, _alpha_ is True if
array.shape[2] == 4.
copy If True, the data is copied before converting to QImage.
If False, the new QImage points directly to the data in the array.
Note that the array must be contiguous for this to work
(see numpy.ascontiguousarray).
transpose If True (the default), the array x/y axes are transposed before
creating the image. Note that Qt expects the axes to be in
(height, width) order whereas pyqtgraph usually prefers the
opposite.
============== ===================================================================
"""
## create QImage from buffer
profile = debug.Profiler()
copied = False
if imgData.ndim == 2:
imgFormat = QtGui.QImage.Format.Format_Grayscale8
elif imgData.ndim == 3:
# If we didn't explicitly specify alpha, check the array shape.
if alpha is None:
alpha = (imgData.shape[2] == 4)
if imgData.shape[2] == 3: # need to make alpha channel (even if alpha==False; QImage requires 32 bpp)
if copy is True:
d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)
d2[:,:,:3] = imgData
d2[:,:,3] = 255
imgData = d2
copied = True
else:
raise Exception('Array has only 3 channels; cannot make QImage without copying.')
profile("add alpha channel")
if alpha:
imgFormat = QtGui.QImage.Format.Format_ARGB32
else:
imgFormat = QtGui.QImage.Format.Format_RGB32
else:
raise TypeError("Image array must have ndim = 2 or 3.")
if transpose:
imgData = imgData.transpose((1, 0, 2)) # QImage expects row-major order
if not imgData.flags['C_CONTIGUOUS']:
if copy is False:
extra = ' (try setting transpose=False)' if transpose else ''
raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)
imgData = np.ascontiguousarray(imgData)
copied = True
profile("ascontiguousarray")
if copy is True and copied is False:
imgData = imgData.copy()
profile("copy")
return ndarray_to_qimage(imgData, imgFormat)
def ndarray_from_qimage(qimg):
img_ptr = qimg.bits()
if img_ptr is None:
raise ValueError("Null QImage not supported")
h, w = qimg.height(), qimg.width()
bpl = qimg.bytesPerLine()
depth = qimg.depth()
logical_bpl = w * depth // 8
if QT_LIB.startswith('PyQt'):
# sizeInBytes() was introduced in Qt 5.10
# however PyQt5 5.12 will fail with:
# "TypeError: QImage.sizeInBytes() is a private method"
# note that sizeInBytes() works fine with:
# PyQt5 5.15, PySide2 5.12, PySide2 5.15
img_ptr.setsize(h * bpl)
memory = np.frombuffer(img_ptr, dtype=np.ubyte).reshape((h, bpl))
memory = memory[:, :logical_bpl]
if depth in (8, 24, 32):
dtype = np.uint8
nchan = depth // 8
elif depth in (16, 64):
dtype = np.uint16
nchan = depth // 16
else:
raise ValueError("Unsupported Image Type")
shape = h, w
if nchan != 1:
shape = shape + (nchan,)
arr = memory.view(dtype).reshape(shape)
return arr
def imageToArray(img, copy=False, transpose=True):
"""
Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.
By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if
the QImage is collected before the array, there may be trouble).
The array will have shape (width, height, (b,g,r,a)).
"""
arr = ndarray_from_qimage(img)
fmt = img.format()
if fmt == img.Format.Format_RGB32:
arr[...,3] = 255
if copy:
arr = arr.copy()
if transpose:
return arr.transpose((1,0,2))
else:
return arr
def colorToAlpha(data, color):
"""
Given an RGBA image in *data*, convert *color* to be transparent.
*data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be
an array (3) of ubyte values.
This is particularly useful for use with images that have a black or white background.
Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c
Credit:
/*
* Color To Alpha plug-in v1.0 by Seth Burgess, sjburges@gimp.org 1999/05/14
* with algorithm by clahey
*/
"""
data = data.astype(float)
if data.shape[-1] == 3: ## add alpha channel if needed
d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)
d2[...,:3] = data
d2[...,3] = 255
data = d2
color = color.astype(float)
alpha = np.zeros(data.shape[:2]+(3,), dtype=float)
output = data.copy()
for i in [0,1,2]:
d = data[...,i]
c = color[i]
mask = d > c
alpha[...,i][mask] = (d[mask] - c) / (255. - c)
imask = d < c
alpha[...,i][imask] = (c - d[imask]) / c
output[...,3] = alpha.max(axis=2) * 255.
mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel
correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha
for i in [0,1,2]:
output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]
output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values
#raise Exception()
return np.clip(output, 0, 255).astype(np.ubyte)
def gaussianFilter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
cp = getCupy()
xp = cp.get_array_module(data) if cp else np
if xp.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = sigma[ax]
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = xp.arange(-ksize, ksize)
kernel = xp.exp(-x**2 / (2*s**2))
kshape = [1,] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*xp.pi)**0.5)
filtered = scale * xp.fft.irfft(xp.fft.rfft(filtered, shape, axis=ax) *
xp.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
filtered = filtered[tuple(sl)]
return filtered + baseline
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
if n <= 1:
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def _compute_backfill_indices(isfinite):
# the presence of inf/nans result in an empty QPainterPath being generated
# this behavior started in Qt 5.12.3 and was introduced in this commit
# https://github.com/qt/qtbase/commit/c04bd30de072793faee5166cff866a4c4e0a9dd7
# We therefore replace non-finite values
# credit: Divakar https://stackoverflow.com/a/41191127/643629
mask = ~isfinite
idx = np.arange(len(isfinite))
idx[mask] = -1
np.maximum.accumulate(idx, out=idx)
first = np.searchsorted(idx, 0)
if first < len(isfinite):
# Replace all non-finite entries from beginning of arr with the first finite one
idx[:first] = first
return idx
else:
return None
def _arrayToQPath_all(x, y, finiteCheck):
n = x.shape[0]
if n == 0:
return QtGui.QPainterPath()
finite_idx = None
if finiteCheck:
isfinite = np.isfinite(x) & np.isfinite(y)
if not isfinite.all():
finite_idx = isfinite.nonzero()[0]
n = len(finite_idx)
if n < 2:
return QtGui.QPainterPath()
chunksize = 10000
numchunks = (n + chunksize - 1) // chunksize
minchunks = 3
if numchunks < minchunks:
# too few chunks, batching would be a pessimization
poly = create_qpolygonf(n)
arr = ndarray_from_qpolygonf(poly)
if finite_idx is None:
arr[:, 0] = x
arr[:, 1] = y
else:
arr[:, 0] = x[finite_idx]
arr[:, 1] = y[finite_idx]
path = QtGui.QPainterPath()
if hasattr(path, 'reserve'): # Qt 5.13
path.reserve(n)
path.addPolygon(poly)
return path
# at this point, we have numchunks >= minchunks
path = QtGui.QPainterPath()
if hasattr(path, 'reserve'): # Qt 5.13
path.reserve(n)
subpoly = QtGui.QPolygonF()
subpath = None
for idx in range(numchunks):
sl = slice(idx*chunksize, min((idx+1)*chunksize, n))
currsize = sl.stop - sl.start
if currsize != subpoly.size():
if hasattr(subpoly, 'resize'):
subpoly.resize(currsize)
else:
subpoly.fill(QtCore.QPointF(), currsize)
subarr = ndarray_from_qpolygonf(subpoly)
if finite_idx is None:
subarr[:, 0] = x[sl]
subarr[:, 1] = y[sl]
else:
fiv = finite_idx[sl] # view
subarr[:, 0] = x[fiv]
subarr[:, 1] = y[fiv]
if subpath is None:
subpath = QtGui.QPainterPath()
subpath.addPolygon(subpoly)
path.connectPath(subpath)
if hasattr(subpath, 'clear'): # Qt 5.13
subpath.clear()
else:
subpath = None
return path
def _arrayToQPath_finite(x, y, isfinite=None):
n = x.shape[0]
if n == 0:
return QtGui.QPainterPath()
if isfinite is None:
isfinite = np.isfinite(x) & np.isfinite(y)
path = QtGui.QPainterPath()
if hasattr(path, 'reserve'): # Qt 5.13
path.reserve(n)
sidx = np.nonzero(~isfinite)[0] + 1
# note: the chunks are views
xchunks = np.split(x, sidx)
ychunks = np.split(y, sidx)
chunks = list(zip(xchunks, ychunks))
# create a single polygon able to hold the largest chunk
maxlen = max(len(chunk) for chunk in xchunks)
subpoly = create_qpolygonf(maxlen)
subarr = ndarray_from_qpolygonf(subpoly)
# resize and fill do not change the capacity
if hasattr(subpoly, 'resize'):
subpoly_resize = subpoly.resize
else:
# PyQt will be less efficient
subpoly_resize = lambda n, v=QtCore.QPointF() : subpoly.fill(v, n)
# notes:
# - we backfill the non-finite in order to get the same image as the
# old codepath on the CI. somehow P1--P2 gets rendered differently
# from P1--P2--P2
# - we do not generate MoveTo(s) that are not followed by a LineTo,
# thus the QPainterPath can be different from the old codepath's
# all chunks except the last chunk have a trailing non-finite
for xchunk, ychunk in chunks[:-1]:
lc = len(xchunk)
if lc <= 1:
# len 1 means we have a string of non-finite
continue
subpoly_resize(lc)
subarr[:lc, 0] = xchunk
subarr[:lc, 1] = ychunk
subarr[lc-1] = subarr[lc-2] # fill non-finite with its neighbour
path.addPolygon(subpoly)
# handle last chunk, which is either all-finite or empty
for xchunk, ychunk in chunks[-1:]:
lc = len(xchunk)
if lc <= 1:
# can't draw a line with just 1 point
continue
subpoly_resize(lc)
subarr[:lc, 0] = xchunk
subarr[:lc, 1] = ychunk
path.addPolygon(subpoly)
return path
def arrayToQPath(x, y, connect='all', finiteCheck=True):
"""
Convert an array of x,y coordinates to QPainterPath as efficiently as
possible. The *connect* argument may be 'all', indicating that each point
should be connected to the next; 'pairs', indicating that each pair of
points should be connected, or an array of int32 values (0 or 1) indicating
connections.
Parameters
----------
x : (N,) ndarray
x-values to be plotted
y : (N,) ndarray
y-values to be plotted, must be same length as `x`
connect : {'all', 'pairs', 'finite', (N,) ndarray}, optional
Argument detailing how to connect the points in the path. `all` will
have sequential points being connected. `pairs` generates lines
between every other point. `finite` only connects points that are
finite. If an ndarray is passed, containing int32 values of 0 or 1,
only values with 1 will connect to the previous point. Def
finiteCheck : bool, default Ture
When false, the check for finite values will be skipped, which can
improve performance. If nonfinite values are present in `x` or `y`,
an empty QPainterPath will be generated.
Returns
-------
QPainterPath
QPainterPath object to be drawn
Raises
------
ValueError
Raised when the connect argument has an invalid value placed within.
Notes
-----
A QPainterPath is generated through one of two ways. When the connect
parameter is 'all', a QPolygonF object is created, and
``QPainterPath.addPolygon()`` is called. For other connect parameters
a ``QDataStream`` object is created and the QDataStream >> QPainterPath
operator is used to pass the data. The memory format is as follows
numVerts(i4)
0(i4) x(f8) y(f8) <-- 0 means this vertex does not connect
1(i4) x(f8) y(f8) <-- 1 means this vertex connects to the previous vertex
...
cStart(i4) fillRule(i4)
see: https://github.com/qt/qtbase/blob/dev/src/gui/painting/qpainterpath.cpp
All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
This binary format may change in future versions of Qt
"""
n = x.shape[0]
if n == 0:
return QtGui.QPainterPath()
connect_array = None
if isinstance(connect, np.ndarray):
# make connect argument contain only str type
connect_array, connect = connect, 'array'
isfinite = None
if connect == 'finite':
if not finiteCheck:
# if user specified to skip finite check, then we skip the heuristic
return _arrayToQPath_finite(x, y)
# otherwise use a heuristic
# if non-finite aren't that many, then use_qpolyponf
isfinite = np.isfinite(x) & np.isfinite(y)
nonfinite_cnt = n - np.sum(isfinite)
all_isfinite = nonfinite_cnt == 0
if all_isfinite:
# delegate to connect='all'
connect = 'all'
finiteCheck = False
elif nonfinite_cnt / n < 2 / 100:
return _arrayToQPath_finite(x, y, isfinite)
else:
# delegate to connect=ndarray
# finiteCheck=True, all_isfinite=False
connect = 'array'
connect_array = isfinite
if connect == 'all':
return _arrayToQPath_all(x, y, finiteCheck)
backstore = QtCore.QByteArray()
backstore.resize(4 + n*20 + 8) # contents uninitialized
backstore.replace(0, 4, struct.pack('>i', n))
# cStart, fillRule (Qt.FillRule.OddEvenFill)
backstore.replace(4+n*20, 8, struct.pack('>ii', 0, 0))
arr = np.frombuffer(backstore, dtype=[('c', '>i4'), ('x', '>f8'), ('y', '>f8')],
count=n, offset=4)
backfill_idx = None
if finiteCheck:
if isfinite is None:
isfinite = np.isfinite(x) & np.isfinite(y)
all_isfinite = np.all(isfinite)
if not all_isfinite:
backfill_idx = _compute_backfill_indices(isfinite)
if backfill_idx is None:
arr['x'] = x
arr['y'] = y
else:
arr['x'] = x[backfill_idx]
arr['y'] = y[backfill_idx]
# decide which points are connected by lines
if connect == 'pairs':
arr['c'][0::2] = 0
arr['c'][1::2] = 1 # connect every 2nd point to every 1st one
elif connect == 'array':
# Let's call a point with either x or y being nan is an invalid point.
# A point will anyway not connect to an invalid point regardless of the
# 'c' value of the invalid point. Therefore, we should set 'c' to 0 for
# the next point of an invalid point.
arr['c'][:1] = 0 # the first vertex has no previous vertex to connect
arr['c'][1:] = connect_array[:-1]
else:
raise ValueError('connect argument must be "all", "pairs", "finite", or array')
path = QtGui.QPainterPath()
if hasattr(path, 'reserve'): # Qt 5.13
path.reserve(n)
ds = QtCore.QDataStream(backstore)
ds >> path
return path
def ndarray_from_qpolygonf(polyline):
nbytes = 2 * len(polyline) * 8
if QT_LIB.startswith('PyQt'):
buffer = polyline.data()
if buffer is None:
buffer = Qt.sip.voidptr(0)
buffer.setsize(nbytes)
else:
ptr = polyline.data()
if ptr is None:
ptr = 0
buffer = Qt.shiboken.VoidPtr(ptr, nbytes, True)
memory = np.frombuffer(buffer, np.double).reshape((-1, 2))
return memory
def create_qpolygonf(size):
polyline = QtGui.QPolygonF()
if QT_LIB.startswith('PyQt'):
polyline.fill(QtCore.QPointF(), size)
else:
polyline.resize(size)
return polyline
def arrayToQPolygonF(x, y):
"""
Utility function to convert two 1D-NumPy arrays representing curve data
(X-axis, Y-axis data) into a single open polygon (QtGui.PolygonF) object.
Thanks to PythonQwt for making this code available
License/copyright: MIT License © Pierre Raybaut 2020.
Parameters
----------
x : np.array
x-axis coordinates for data to be plotted, must have have ndim of 1
y : np.array
y-axis coordinates for data to be plotted, must have ndim of 1 and
be the same length as x
Returns
-------
QPolygonF
Open QPolygonF object that represents the path looking to be plotted
Raises
------
ValueError
When xdata or ydata does not meet the required criteria
"""
if not (
x.size == y.size == x.shape[0] == y.shape[0]
):
raise ValueError("Arguments must be 1D and the same size")
size = x.size
polyline = create_qpolygonf(size)
memory = ndarray_from_qpolygonf(polyline)
memory[:, 0] = x
memory[:, 1] = y
return polyline
#def isosurface(data, level):
#"""
#Generate isosurface from volumetric data using marching tetrahedra algorithm.
#See Paul Bourke, "Polygonising a Scalar Field Using Tetrahedrons" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)
#*data* 3D numpy array of scalar values
#*level* The level at which to generate an isosurface
#"""
#facets = []
### mark everything below the isosurface level
#mask = data < level
#### make eight sub-fields
#fields = np.empty((2,2,2), dtype=object)
#slices = [slice(0,-1), slice(1,None)]
#for i in [0,1]:
#for j in [0,1]:
#for k in [0,1]:
#fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
### split each cell into 6 tetrahedra
### these all have the same 'orienation'; points 1,2,3 circle
### clockwise around point 0
#tetrahedra = [
#[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],
#[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],
#[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],
#[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],
#[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],
#[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]
#]
### each tetrahedron will be assigned an index
### which determines how to generate its facets.
### this structure is:
### facets[index][facet1, facet2, ...]
### where each facet is triangular and its points are each
### interpolated between two points on the tetrahedron
### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]
### facet points always circle clockwise if you are looking
### at them from below the isosurface.
#indexFacets = [
#[], ## all above
#[[(0,1), (0,2), (0,3)]], # 0 below
#[[(1,0), (1,3), (1,2)]], # 1 below
#[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below
#[[(2,0), (2,1), (2,3)]], # 2 below
#[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below
#[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below
#[[(3,0), (3,1), (3,2)]], # 3 above
#[[(3,0), (3,2), (3,1)]], # 3 below
#[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below
#[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below
#[[(2,0), (2,3), (2,1)]], # 0,1,3 below
#[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below
#[[(1,0), (1,2), (1,3)]], # 0,2,3 below
#[[(0,1), (0,3), (0,2)]], # 1,2,3 below
#[] ## all below
#]
#for tet in tetrahedra:
### get the 4 fields for this tetrahedron
#tetFields = [fields[c] for c in tet]
### generate an index for each grid cell
#index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8
### add facets
#for i in range(index.shape[0]): # data x-axis
#for j in range(index.shape[1]): # data y-axis
#for k in range(index.shape[2]): # data z-axis
#for f in indexFacets[index[i,j,k]]: # faces to generate for this tet
#pts = []
#for l in [0,1,2]: # points in this face
#p1 = tet[f[l][0]] # tet corner 1
#p2 = tet[f[l][1]] # tet corner 2
#pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners
#facets.append(pts)
#return facets
def isocurve(data, level, connected=False, extendToEdge=False, path=False):
"""
Generate isocurve from 2D data using marching squares algorithm.
============== =========================================================
**Arguments:**
data 2D numpy array of scalar values
level The level at which to generate an isosurface
connected If False, return a single long list of point pairs
If True, return multiple long lists of connected point
locations. (This is slower but better for drawing
continuous lines)
extendToEdge If True, extend the curves to reach the exact edges of
the data.
path if True, return a QPainterPath rather than a list of
vertex coordinates. This forces connected=True.
============== =========================================================
This function is SLOW; plenty of room for optimization here.
"""
if path is True:
connected = True
if extendToEdge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0,0] = d2[0,1]
d2[0,-1] = d2[1,-1]
d2[-1,0] = d2[-1,1]
d2[-1,-1] = d2[-1,-2]
data = d2
sideTable = [
[],
[0,1],
[1,2],
[0,2],
[0,3],
[1,3],
[0,1,2,3],
[2,3],
[2,3],
[0,1,2,3],
[1,3],
[0,3],
[0,2],
[1,2],
[0,1],
[]
]
edgeKey=[
[(0,1), (0,0)],
[(0,0), (1,0)],
[(1,0), (1,1)],
[(1,1), (0,1)]
]
lines = []
## mark everything below the isosurface level
mask = data < level
### make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
fields[i,j] = mask[slices[i], slices[j]]
#vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
vertIndex = i+2*j
#print i,j,k," : ", fields[i,j,k], 2**vertIndex
np.add(index, fields[i,j] * 2**vertIndex, out=index, casting='unsafe')
#print index
#print index
## add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = sideTable[index[i,j]]
for l in range(0, len(sides), 2): ## faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0,1]: # points in this face
p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge
p2 = edgeKey[edges[m]][1]
v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
p = ( ## interpolate between corners
p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5
)
if extendToEdge:
## check bounds
p = (
min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)),
)
if connected:
gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2
pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
## turn disjoint list of segments into continuous lines
#lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]
#lines = [[(float(a), a), (float(b), b)] for a,b in lines]
points = {} ## maps each point to its connections
for a,b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a,b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b,a])
## rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: ## already used this point elsewhere
continue
#print "===========", k
for chain in chains:
#print " chain:", chain
x = None
while True:
if x == chain[-1][1]:
break ## nothing left to do on this chain
x = chain[-1][1]
if x == k:
break ## chain has looped; we're done and can ignore the opposite chain
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
#print " ext:", conn
chain.extend(conn[1:])
#print " del:", x
del points[x]
if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction
chains.pop()
break
## extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
chain = chain[1][1:][::-1] + chain[0] # join together ends of chain
else:
chain = chain[0]
lines.append([p[0] for p in chain])
if not path:
return lines ## a list of pairs of points
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
return path
def traceImage(image, values, smooth=0.5):
"""
Convert an image to a set of QPainterPath curves.
One curve will be generated for each item in *values*; each curve outlines the area
of the image that is closer to its value than to any others.
If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)
The parameter *smooth* is expressed in pixels.
"""
if values.ndim == 2:
values = values.T
values = values[np.newaxis, np.newaxis, ...].astype(float)
image = image[..., np.newaxis].astype(float)
diff = np.abs(image-values)
if values.ndim == 4:
diff = diff.sum(axis=2)
labels = np.argmin(diff, axis=2)
paths = []
for i in range(diff.shape[-1]):
d = (labels==i).astype(float)
d = gaussianFilter(d, (smooth, smooth))
lines = isocurve(d, 0.5, connected=True, extendToEdge=True)
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
paths.append(path)
return paths
IsosurfaceDataCache = None
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values. Must be contiguous.
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
## For improvement, see:
##
## Efficient implementation of Marching Cubes' cases with topological guarantees.
## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
## Precompute lookup tables on the first run
global IsosurfaceDataCache
if IsosurfaceDataCache is None:
## map from grid cell index to edge index.
## grid cell index tells us which corners are below the isosurface,
## edge index tells us which edges are cut by the isosurface.
## (Data stolen from Bourk; see above.)
edgeTable = np.array([
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
], dtype=np.uint16)
## Table of triangles to use for filling each grid cell.
## Each set of three integers tells us which three edges to
## draw a triangle between.
## (Data stolen from Bourk; see above.)
triTable = [
[],
[0, 8, 3],
[0, 1, 9],
[1, 8, 3, 9, 8, 1],
[1, 2, 10],
[0, 8, 3, 1, 2, 10],
[9, 2, 10, 0, 2, 9],
[2, 8, 3, 2, 10, 8, 10, 9, 8],
[3, 11, 2],
[0, 11, 2, 8, 11, 0],
[1, 9, 0, 2, 3, 11],
[1, 11, 2, 1, 9, 11, 9, 8, 11],
[3, 10, 1, 11, 10, 3],
[0, 10, 1, 0, 8, 10, 8, 11, 10],
[3, 9, 0, 3, 11, 9, 11, 10, 9],
[9, 8, 10, 10, 8, 11],
[4, 7, 8],
[4, 3, 0, 7, 3, 4],
[0, 1, 9, 8, 4, 7],
[4, 1, 9, 4, 7, 1, 7, 3, 1],
[1, 2, 10, 8, 4, 7],
[3, 4, 7, 3, 0, 4, 1, 2, 10],
[9, 2, 10, 9, 0, 2, 8, 4, 7],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],
[8, 4, 7, 3, 11, 2],
[11, 4, 7, 11, 2, 4, 2, 0, 4],
[9, 0, 1, 8, 4, 7, 2, 3, 11],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],
[3, 10, 1, 3, 11, 10, 7, 8, 4],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],
[4, 7, 11, 4, 11, 9, 9, 11, 10],
[9, 5, 4],
[9, 5, 4, 0, 8, 3],
[0, 5, 4, 1, 5, 0],
[8, 5, 4, 8, 3, 5, 3, 1, 5],
[1, 2, 10, 9, 5, 4],
[3, 0, 8, 1, 2, 10, 4, 9, 5],
[5, 2, 10, 5, 4, 2, 4, 0, 2],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],
[9, 5, 4, 2, 3, 11],
[0, 11, 2, 0, 8, 11, 4, 9, 5],
[0, 5, 4, 0, 1, 5, 2, 3, 11],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],
[10, 3, 11, 10, 1, 3, 9, 5, 4],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],
[5, 4, 8, 5, 8, 10, 10, 8, 11],
[9, 7, 8, 5, 7, 9],
[9, 3, 0, 9, 5, 3, 5, 7, 3],
[0, 7, 8, 0, 1, 7, 1, 5, 7],
[1, 5, 3, 3, 5, 7],
[9, 7, 8, 9, 5, 7, 10, 1, 2],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],
[2, 10, 5, 2, 5, 3, 3, 5, 7],
[7, 9, 5, 7, 8, 9, 3, 11, 2],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],
[11, 2, 1, 11, 1, 7, 7, 1, 5],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],
[11, 10, 5, 7, 11, 5],
[10, 6, 5],
[0, 8, 3, 5, 10, 6],
[9, 0, 1, 5, 10, 6],
[1, 8, 3, 1, 9, 8, 5, 10, 6],
[1, 6, 5, 2, 6, 1],
[1, 6, 5, 1, 2, 6, 3, 0, 8],
[9, 6, 5, 9, 0, 6, 0, 2, 6],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],
[2, 3, 11, 10, 6, 5],
[11, 0, 8, 11, 2, 0, 10, 6, 5],
[0, 1, 9, 2, 3, 11, 5, 10, 6],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],
[6, 3, 11, 6, 5, 3, 5, 1, 3],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],
[6, 5, 9, 6, 9, 11, 11, 9, 8],
[5, 10, 6, 4, 7, 8],
[4, 3, 0, 4, 7, 3, 6, 5, 10],
[1, 9, 0, 5, 10, 6, 8, 4, 7],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],
[6, 1, 2, 6, 5, 1, 4, 7, 8],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],
[3, 11, 2, 7, 8, 4, 10, 6, 5],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],
[10, 4, 9, 6, 4, 10],
[4, 10, 6, 4, 9, 10, 0, 8, 3],
[10, 0, 1, 10, 6, 0, 6, 4, 0],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],
[1, 4, 9, 1, 2, 4, 2, 6, 4],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],
[0, 2, 4, 4, 2, 6],
[8, 3, 2, 8, 2, 4, 4, 2, 6],
[10, 4, 9, 10, 6, 4, 11, 2, 3],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],
[3, 11, 6, 3, 6, 0, 0, 6, 4],
[6, 4, 8, 11, 6, 8],
[7, 10, 6, 7, 8, 10, 8, 9, 10],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],
[10, 6, 7, 10, 7, 1, 1, 7, 3],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],
[7, 8, 0, 7, 0, 6, 6, 0, 2],
[7, 3, 2, 6, 7, 2],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],
[0, 9, 1, 11, 6, 7],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],
[7, 11, 6],
[7, 6, 11],
[3, 0, 8, 11, 7, 6],
[0, 1, 9, 11, 7, 6],
[8, 1, 9, 8, 3, 1, 11, 7, 6],
[10, 1, 2, 6, 11, 7],
[1, 2, 10, 3, 0, 8, 6, 11, 7],
[2, 9, 0, 2, 10, 9, 6, 11, 7],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],
[7, 2, 3, 6, 2, 7],
[7, 0, 8, 7, 6, 0, 6, 2, 0],
[2, 7, 6, 2, 3, 7, 0, 1, 9],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],
[10, 7, 6, 10, 1, 7, 1, 3, 7],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],
[7, 6, 10, 7, 10, 8, 8, 10, 9],
[6, 8, 4, 11, 8, 6],
[3, 6, 11, 3, 0, 6, 0, 4, 6],
[8, 6, 11, 8, 4, 6, 9, 0, 1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],
[6, 8, 4, 6, 11, 8, 2, 10, 1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],
[8, 2, 3, 8, 4, 2, 4, 6, 2],
[0, 4, 2, 4, 6, 2],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],
[1, 9, 4, 1, 4, 2, 2, 4, 6],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],
[10, 1, 0, 10, 0, 6, 6, 0, 4],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],
[10, 9, 4, 6, 10, 4],
[4, 9, 5, 7, 6, 11],
[0, 8, 3, 4, 9, 5, 11, 7, 6],
[5, 0, 1, 5, 4, 0, 7, 6, 11],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],
[9, 5, 4, 10, 1, 2, 7, 6, 11],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],
[7, 2, 3, 7, 6, 2, 5, 4, 9],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],
[6, 9, 5, 6, 11, 9, 11, 8, 9],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],
[6, 11, 3, 6, 3, 5, 5, 3, 1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],
[9, 5, 6, 9, 6, 0, 0, 6, 2],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],
[1, 5, 6, 2, 1, 6],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],
[0, 3, 8, 5, 6, 10],
[10, 5, 6],
[11, 5, 10, 7, 5, 11],
[11, 5, 10, 11, 7, 5, 8, 3, 0],
[5, 11, 7, 5, 10, 11, 1, 9, 0],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],
[11, 1, 2, 11, 7, 1, 7, 5, 1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],
[2, 5, 10, 2, 3, 5, 3, 7, 5],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],
[1, 3, 5, 3, 7, 5],
[0, 8, 7, 0, 7, 1, 1, 7, 5],
[9, 0, 3, 9, 3, 5, 5, 3, 7],
[9, 8, 7, 5, 9, 7],
[5, 8, 4, 5, 10, 8, 10, 11, 8],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],
[9, 4, 5, 2, 11, 3],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],
[5, 10, 2, 5, 2, 4, 4, 2, 0],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],
[8, 4, 5, 8, 5, 3, 3, 5, 1],
[0, 4, 5, 1, 0, 5],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],
[9, 4, 5],
[4, 11, 7, 4, 9, 11, 9, 10, 11],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],
[11, 7, 4, 11, 4, 2, 2, 4, 0],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],
[1, 10, 2, 8, 7, 4],
[4, 9, 1, 4, 1, 7, 7, 1, 3],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],
[4, 0, 3, 7, 4, 3],
[4, 8, 7],
[9, 10, 8, 10, 11, 8],
[3, 0, 9, 3, 9, 11, 11, 9, 10],
[0, 1, 10, 0, 10, 8, 8, 10, 11],
[3, 1, 10, 11, 3, 10],
[1, 2, 11, 1, 11, 9, 9, 11, 8],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],
[0, 2, 11, 8, 0, 11],
[3, 2, 11],
[2, 3, 8, 2, 8, 10, 10, 8, 9],
[9, 10, 2, 0, 9, 2],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],
[1, 10, 2],
[1, 3, 8, 9, 1, 8],
[0, 9, 1],
[0, 3, 8],
[]
]
edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 2],
[1, 0, 0, 2],
[1, 1, 0, 2],
[0, 1, 0, 2],
#[9, 9, 9, 9] ## fake
], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.
nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)
faceShiftTables = [None]
for i in range(1,6):
## compute lookup table of index: vertexes mapping
faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)
faceTableInds = np.argwhere(nTableFaces == i)
faceTableI[faceTableInds[:,0]] = np.array([triTable[j[0]] for j in faceTableInds])
faceTableI = faceTableI.reshape((len(triTable), i, 3))
faceShiftTables.append(edgeShifts[faceTableI])
## Let's try something different:
#faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)
#for i,f in enumerate(triTable):
#f = np.array(f + [12] * (15-len(f))).reshape(5,3)
#faceTable[i] = edgeShifts[f]
IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)
else:
faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache
# We use strides below, which means we need contiguous array input.
# Ideally we can fix this just by removing the dependency on strides.
if not data.flags['C_CONTIGUOUS']:
raise TypeError("isosurface input data must be c-contiguous.")
## mark everything below the isosurface level
mask = data < level
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
for k in [0,1]:
fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
np.add(index, fields[i,j,k] * 2**vertIndex, out=index, casting='unsafe')
### Generate table of edges that have been cut
cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edgeTable[index]
for i, shift in enumerate(edgeShifts[:12]):
slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]
cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions
m = cutEdges > 0
vertexInds = np.argwhere(m) ## argwhere is slow!
vertexes = vertexInds[:,:3].astype(np.float32)
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cutEdges array as a lookup table for vertex IDs
cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])
for i in [0,1,2]:
vim = vertexInds[:,3] == i
vi = vertexInds[vim, :3]
viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)
v1 = dataFlat[viFlat]
v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]
vertexes[vim,i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
#cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face
#cellInds = index[cells[:,0], cells[:,1], cells[:,2]]
#verts = faceTable[cellInds]
#mask = verts[...,0,0] != 9
#verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
#verts = verts[mask]
#faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.
## To allow this to be vectorized efficiently, we count the number of faces in each
## grid cell and handle each group of cells with the same number together.
## determine how many faces to assign to each grid cell
nFaces = nTableFaces[index]
totFaces = nFaces.sum()
faces = np.empty((totFaces, 3), dtype=np.uint32)
ptr = 0
#import debug
#p = debug.Profiler()
## this helps speed up an indexing operation later on
cs = np.array(cutEdges.strides)//cutEdges.itemsize
cutEdges = cutEdges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1,6):
### expensive:
#profiler()
cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)
#profiler()
if cells.shape[0] == 0:
continue
cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round
#profiler()
### expensive:
verts = faceShiftTables[i][cellInds]
#profiler()
np.add(verts[...,:3], cells[:,np.newaxis,np.newaxis,:], out=verts[...,:3], casting='unsafe') ## we now have indexes into cutEdges
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
#profiler()
### expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vertInds = cutEdges[verts]
#profiler()
nv = vertInds.shape[0]
#profiler()
faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))
#profiler()
ptr += nv
return vertexes, faces
def _pinv_fallback(tr):
arr = np.array([tr.m11(), tr.m12(), tr.m13(),
tr.m21(), tr.m22(), tr.m23(),
tr.m31(), tr.m32(), tr.m33()])
arr.shape = (3, 3)
pinv = np.linalg.pinv(arr)
return QtGui.QTransform(*pinv.ravel().tolist())
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
A pseudo-inverse is returned if tr is not invertible.
Note that this function is preferred over QTransform.inverted() due to
bugs in that method. (specifically, Qt has floating-point precision issues
when determining whether a matrix is invertible)
"""
try:
det = tr.determinant()
detr = 1.0 / det # let singular matrices raise ZeroDivisionError
inv = tr.adjoint()
inv *= detr
return inv
except ZeroDivisionError:
return _pinv_fallback(tr)
def pseudoScatter(data, spacing=None, shuffle=True, bidir=False, method='exact'):
"""Return an array of position values needed to make beeswarm or column scatter plots.
Used for examining the distribution of values in an array.
Given an array of x-values, construct an array of y-values such that an x,y scatter-plot
will not have overlapping points (it will look similar to a histogram).
"""
if method == 'exact':
return _pseudoScatterExact(data, spacing=spacing, shuffle=shuffle, bidir=bidir)
elif method == 'histogram':
return _pseudoScatterHistogram(data, spacing=spacing, shuffle=shuffle, bidir=bidir)
def _pseudoScatterHistogram(data, spacing=None, shuffle=True, bidir=False):
"""Works by binning points into a histogram and spreading them out to fill the bin.
Faster method, but can produce blocky results.
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
yvals = np.empty(len(data))
dmin = data.min()
dmax = data.max()
nbins = int((dmax-dmin) / spacing) + 1
bins = np.linspace(dmin, dmax, nbins)
dx = bins[1] - bins[0]
dbins = ((data - bins[0]) / dx).astype(int)
binCounts = {}
for i,j in enumerate(dbins):
c = binCounts.get(j, -1) + 1
binCounts[j] = c
yvals[i] = c
if bidir is True:
for i in range(nbins):
yvals[dbins==i] -= binCounts.get(i, 0) * 0.5
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def _pseudoScatterExact(data, spacing=None, shuffle=True, bidir=False):
"""Works by stacking points up one at a time, searching for the lowest position available at each point.
This method produces nice, smooth results but can be prohibitively slow for large datasets.
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
s2 = spacing**2
yvals = np.empty(len(data))
if len(data) == 0:
return yvals
yvals[0] = 0
for i in range(1,len(data)):
x = data[i] # current x value to be placed
x0 = data[:i] # all x values already placed
y0 = yvals[:i] # all y values already placed
y = 0
dx = (x0-x)**2 # x-distance to each previous point
xmask = dx < s2 # exclude anything too far away
if xmask.sum() > 0:
if bidir:
dirs = [-1, 1]
else:
dirs = [1]
yopts = []
for direction in dirs:
y = 0
dx2 = dx[xmask]
dy = (s2 - dx2)**0.5
limits = np.empty((2,len(dy))) # ranges of y-values to exclude
limits[0] = y0[xmask] - dy
limits[1] = y0[xmask] + dy
while True:
# ignore anything below this y-value
if direction > 0:
mask = limits[1] >= y
else:
mask = limits[0] <= y
limits2 = limits[:,mask]
# are we inside an excluded region?
mask = (limits2[0] < y) & (limits2[1] > y)
if mask.sum() == 0:
break
if direction > 0:
y = limits2[:,mask].max()
else:
y = limits2[:,mask].min()
yopts.append(y)
if bidir:
y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]
else:
y = yopts[0]
yvals[i] = y
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def toposort(deps, nodes=None, seen=None, stack=None, depth=0):
"""Topological sort. Arguments are:
deps dictionary describing dependencies where a:[b,c] means "a depends on b and c"
nodes optional, specifies list of starting nodes (these should be the nodes
which are not depended on by any other nodes). Other candidate starting
nodes will be ignored.
Example::
# Sort the following graph:
#
# B ──┬─────> C <── D
# │ │
# E <─┴─> A <─┘
#
deps = {'a': ['b', 'c'], 'c': ['b', 'd'], 'e': ['b']}
toposort(deps)
=> ['b', 'd', 'c', 'a', 'e']
"""
# fill in empty dep lists
deps = deps.copy()
for k,v in list(deps.items()):
for k in v:
if k not in deps:
deps[k] = []
if nodes is None:
## run through deps to find nodes that are not depended upon
rem = set()
for dep in deps.values():
rem |= set(dep)
nodes = set(deps.keys()) - rem
if seen is None:
seen = set()
stack = []
sorted = []
for n in nodes:
if n in stack:
raise Exception("Cyclic dependency detected", stack + [n])
if n in seen:
continue
seen.add(n)
sorted.extend( toposort(deps, deps[n], seen, stack+[n], depth=depth+1))
sorted.append(n)
return sorted
def disconnect(signal, slot):
"""Disconnect a Qt signal from a slot.
This method augments Qt's Signal.disconnect():
* Return bool indicating whether disconnection was successful, rather than
raising an exception
* Attempt to disconnect prior versions of the slot when using pg.reload
"""
while True:
try:
signal.disconnect(slot)
return True
except (TypeError, RuntimeError):
slot = reload.getPreviousVersion(slot)
if slot is None:
return False
class SignalBlock(object):
"""Class used to temporarily block a Qt signal connection::
with SignalBlock(signal, slot):
# do something that emits a signal; it will
# not be delivered to slot
"""
def __init__(self, signal, slot):
self.signal = signal
self.slot = slot
def __enter__(self):
self.reconnect = disconnect(self.signal, self.slot)
return self
def __exit__(self, *args):
if self.reconnect:
self.signal.connect(self.slot)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.