hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72b389f11090c3291e690acc9bf38811dce7cae | 1,162 | py | Python | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | #!/usr/bin/env python3
"""
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
def extract_redundancy_factor(oclass):
"""Extract the redundancy factor from an object class.
Args:
oclass (str): the object class.
Returns:
int: the redundancy factor.
"""
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
def calculate_min_servers(oclass):
"""Calculate the minimum number of required servers for an object class.
Args:
oclass (str): the object class.
Returns:
int: the minimum number of required servers.
"""
patterns = [
"EC_([0-9]+)P([0-9])+",
"RP_([0-9]+)"
]
for pattern in patterns:
# Findall returns a list where each element is a tuple of groups ()
match = re.findall(pattern, oclass)
if match:
# Sum all groups (). Only index 0 should exist.
return sum(int(n) for n in match[0])
return 1
| 23.714286 | 76 | 0.593804 |
import re
def extract_redundancy_factor(oclass):
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
def calculate_min_servers(oclass):
patterns = [
"EC_([0-9]+)P([0-9])+",
"RP_([0-9]+)"
]
for pattern in patterns:
match = re.findall(pattern, oclass)
if match:
return sum(int(n) for n in match[0])
return 1
| true | true |
f72b3967e92f28affb77f904b54628581c7af2bf | 7,799 | py | Python | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 904 | 2016-10-11T13:35:19.000Z | 2022-03-25T09:29:09.000Z | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 1,866 | 2016-10-15T21:28:09.000Z | 2022-03-29T18:09:20.000Z | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 368 | 2016-10-11T13:44:08.000Z | 2022-03-30T02:23:12.000Z | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import hashlib
from unittest import mock
import os
import tempfile
import shutil
import tarfile
from contextlib import contextmanager
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
@contextmanager
def tar_archive_available():
dir = tempfile.mkdtemp()
file_path = os.path.join(dir, 'mynamespace-mycollection-1.2.3.tar.gz')
with open(file_path, 'wb') as fp:
tar = tarfile.open(fileobj=fp, mode='w')
tar.close()
yield file_path
shutil.rmtree(dir)
class TestCollectionListView(APITestCase):
url = '/api/v2/collections/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='secret')
self.namespace = models.Namespace.objects.create(name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='secret')
patcher = mock.patch('galaxy.common.tasking.create_task')
self.create_task_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_upload(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
})
self.create_task_mock.assert_called_once()
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
def test_upload_w_sha(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes).hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
self.create_task_mock.assert_called_once()
def test_upload_w_invalid_sha(self):
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes + b'x').hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'The sha256 checksum did not match.'
}
def test_upload_invalid_namespace(self):
open_mock = mock.mock_open(read_data='Test data')
with open_mock() as fp:
fp.name = 'wrongnamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'Namespace "wrongnamespace" does not exist.'
}
def test_upload_version_conflict(self):
collection = models.Collection.objects.create(
namespace=self.namespace, name='mycollection')
models.CollectionVersion.objects.create(
collection=collection, version='1.2.3')
open_mock = mock.mock_open(read_data=b'Test data')
with open_mock() as fp:
fp.name = 'mynamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp
})
assert response.status_code == http_codes.HTTP_409_CONFLICT
assert response.json() == {
'code': 'conflict.collection_exists',
'message': 'Collection "mynamespace-mycollection-1.2.3"'
' already exists.'
}
def test_get_collection_list(self):
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
response = self.client.get(self.url)
result = response.json()
assert response.status_code == http_codes.HTTP_200_OK
assert result['count'] == 1
assert result['results'][0]['name'] == 'mycollection'
def test_fail_method_not_allowed(self):
for method in ['PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url)
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
class TestCollectionDetailView(APITestCase):
url_id = '/api/v2/collections/{pk}/'
url_name = '/api/v2/collections/{ns}/{name}/'
def setUp(self):
super().setUp()
self.namespace = models.Namespace.objects.create(
name='mynamespace')
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
self.version1 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.2')
self.version2 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.12')
self.version3 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.0.1')
self.collection.latest_version = self.version2
self.collection.save()
def test_view_success(self):
urls = [
self.url_id.format(pk=self.collection.pk),
self.url_name.format(
ns=self.namespace.name,
name=self.collection.name,
),
]
for url in urls:
response = self.client.get(url)
assert response.status_code == http_codes.HTTP_200_OK
result = response.json()
assert result['id'] == self.collection.pk
assert result['href'] == f'http://testserver{urls[1]}'
assert result['name'] == self.collection.name
assert result['namespace']['name'] == self.namespace.name
assert result['versions_url'] == \
f'http://testserver{urls[1]}versions/'
assert (result['latest_version']['version'] ==
self.version2.version)
assert result['deprecated'] is False
def test_view_404(self):
response = self.client.get(self.url_id.format(pk=self.collection.pk+1))
assert response.status_code == http_codes.HTTP_404_NOT_FOUND
| 36.443925 | 79 | 0.62149 |
import hashlib
from unittest import mock
import os
import tempfile
import shutil
import tarfile
from contextlib import contextmanager
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
@contextmanager
def tar_archive_available():
dir = tempfile.mkdtemp()
file_path = os.path.join(dir, 'mynamespace-mycollection-1.2.3.tar.gz')
with open(file_path, 'wb') as fp:
tar = tarfile.open(fileobj=fp, mode='w')
tar.close()
yield file_path
shutil.rmtree(dir)
class TestCollectionListView(APITestCase):
url = '/api/v2/collections/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='secret')
self.namespace = models.Namespace.objects.create(name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='secret')
patcher = mock.patch('galaxy.common.tasking.create_task')
self.create_task_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_upload(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
})
self.create_task_mock.assert_called_once()
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
def test_upload_w_sha(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes).hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
self.create_task_mock.assert_called_once()
def test_upload_w_invalid_sha(self):
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes + b'x').hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'The sha256 checksum did not match.'
}
def test_upload_invalid_namespace(self):
open_mock = mock.mock_open(read_data='Test data')
with open_mock() as fp:
fp.name = 'wrongnamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'Namespace "wrongnamespace" does not exist.'
}
def test_upload_version_conflict(self):
collection = models.Collection.objects.create(
namespace=self.namespace, name='mycollection')
models.CollectionVersion.objects.create(
collection=collection, version='1.2.3')
open_mock = mock.mock_open(read_data=b'Test data')
with open_mock() as fp:
fp.name = 'mynamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp
})
assert response.status_code == http_codes.HTTP_409_CONFLICT
assert response.json() == {
'code': 'conflict.collection_exists',
'message': 'Collection "mynamespace-mycollection-1.2.3"'
' already exists.'
}
def test_get_collection_list(self):
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
response = self.client.get(self.url)
result = response.json()
assert response.status_code == http_codes.HTTP_200_OK
assert result['count'] == 1
assert result['results'][0]['name'] == 'mycollection'
def test_fail_method_not_allowed(self):
for method in ['PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url)
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
class TestCollectionDetailView(APITestCase):
url_id = '/api/v2/collections/{pk}/'
url_name = '/api/v2/collections/{ns}/{name}/'
def setUp(self):
super().setUp()
self.namespace = models.Namespace.objects.create(
name='mynamespace')
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
self.version1 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.2')
self.version2 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.12')
self.version3 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.0.1')
self.collection.latest_version = self.version2
self.collection.save()
def test_view_success(self):
urls = [
self.url_id.format(pk=self.collection.pk),
self.url_name.format(
ns=self.namespace.name,
name=self.collection.name,
),
]
for url in urls:
response = self.client.get(url)
assert response.status_code == http_codes.HTTP_200_OK
result = response.json()
assert result['id'] == self.collection.pk
assert result['href'] == f'http://testserver{urls[1]}'
assert result['name'] == self.collection.name
assert result['namespace']['name'] == self.namespace.name
assert result['versions_url'] == \
f'http://testserver{urls[1]}versions/'
assert (result['latest_version']['version'] ==
self.version2.version)
assert result['deprecated'] is False
def test_view_404(self):
response = self.client.get(self.url_id.format(pk=self.collection.pk+1))
assert response.status_code == http_codes.HTTP_404_NOT_FOUND
| true | true |
f72b39da0ed6829e91b76ba1b8864ebef44e2299 | 1,043 | py | Python | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | null | null | null | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | null | null | null | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | 1 | 2019-03-04T02:45:59.000Z | 2019-03-04T02:45:59.000Z | import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
from . import appdirs
cwd = os.path.split(os.path.abspath(__file__))[0]
userdir = appdirs.user_data_dir("pycortex", "JamesGao")
usercfg = os.path.join(userdir, "options.cfg")
# Read defaults from pycortex repo
config = configparser.ConfigParser()
config.read(os.path.join(cwd, 'defaults.cfg'))
# Update defaults with user-sepecifed values in user config
files_successfully_read = config.read(usercfg)
# If user config doesn't exist, create it
if len(files_successfully_read) == 0:
os.makedirs(userdir, exist_ok=True)
with open(usercfg, 'w') as fp:
config.write(fp)
#set default path in case the module is imported from the source code directory
if not config.has_option("basic", "filestore"):
config.set("basic", "filestore", os.path.join(cwd, os.pardir, "filestore/db"))
if not config.has_option("webgl", "colormaps"):
config.set("webgl", "colormaps", os.path.join(cwd, os.pardir, "filestore/colormaps"))
| 33.645161 | 89 | 0.731544 | import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
from . import appdirs
cwd = os.path.split(os.path.abspath(__file__))[0]
userdir = appdirs.user_data_dir("pycortex", "JamesGao")
usercfg = os.path.join(userdir, "options.cfg")
config = configparser.ConfigParser()
config.read(os.path.join(cwd, 'defaults.cfg'))
files_successfully_read = config.read(usercfg)
if len(files_successfully_read) == 0:
os.makedirs(userdir, exist_ok=True)
with open(usercfg, 'w') as fp:
config.write(fp)
#set default path in case the module is imported from the source code directory
if not config.has_option("basic", "filestore"):
config.set("basic", "filestore", os.path.join(cwd, os.pardir, "filestore/db"))
if not config.has_option("webgl", "colormaps"):
config.set("webgl", "colormaps", os.path.join(cwd, os.pardir, "filestore/colormaps"))
| true | true |
f72b39f30657384a75e0bf6bea346fedfc2a5b53 | 784 | py | Python | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | null | null | null | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | 4 | 2021-04-08T19:49:25.000Z | 2021-06-10T20:08:37.000Z | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.9 on 2020-08-07 21:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='buyer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='buyer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='transaction',
name='seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL),
),
]
| 30.153846 | 144 | 0.653061 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='buyer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='buyer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='transaction',
name='seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f72b3a24ac71bb8d2a6c614a0679f5609ec4ff65 | 1,051 | py | Python | fedex/services/tracking.py | jzempel/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 19 | 2015-02-02T03:00:54.000Z | 2021-09-06T02:22:22.000Z | fedex/services/tracking.py | hongzhou-liu/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 2 | 2015-08-14T22:05:17.000Z | 2017-03-01T18:54:40.000Z | fedex/services/tracking.py | hongzhou-liu/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 53 | 2015-03-31T14:46:30.000Z | 2022-01-02T15:06:38.000Z | # -*- coding: utf-8 -*-
"""
fedex.services.tracking
~~~~~~~~~~~~~~~~~~~~~~~
FedEx tracking web services.
:copyright: 2014 by Jonathan Zempel.
:license: BSD, see LICENSE for more details.
"""
from .commons import BaseService
class TrackingService(BaseService):
"""Tracking service.
:param configuration: API configuration.
:param wsdl_version: Default ``10``.
"""
def __init__(self, configuration, wsdl_version=10):
super(TrackingService, self).__init__(configuration, "Track",
wsdl_version, "trck")
def create_selection_details(self):
"""Create a new selection details object.
"""
return self.create("TrackSelectionDetail")
def track(self, selection_details, **kwargs):
"""Track a package.
:param selection_details: Details to select the package to track.
:param kwargs: Additional service keyword arguments.
"""
kwargs["SelectionDetails"] = selection_details
return self.call("track", **kwargs)
| 26.275 | 73 | 0.636537 |
from .commons import BaseService
class TrackingService(BaseService):
def __init__(self, configuration, wsdl_version=10):
super(TrackingService, self).__init__(configuration, "Track",
wsdl_version, "trck")
def create_selection_details(self):
return self.create("TrackSelectionDetail")
def track(self, selection_details, **kwargs):
kwargs["SelectionDetails"] = selection_details
return self.call("track", **kwargs)
| true | true |
f72b3a9eea2bf0c678dea23dbb4a5561470fadad | 2,227 | py | Python | extract.py | DengYiping/IAT_EAT | a0763a8167285f99d356f05c50ad5c3120354a58 | [
"MIT"
] | null | null | null | extract.py | DengYiping/IAT_EAT | a0763a8167285f99d356f05c50ad5c3120354a58 | [
"MIT"
] | null | null | null | extract.py | DengYiping/IAT_EAT | a0763a8167285f99d356f05c50ad5c3120354a58 | [
"MIT"
] | null | null | null | import os
import sys
import pefile
import esm
'''
tuning this parameter to get a better curracy
'''
COMMON = set([
'rand', 'malloc', 'realloc', 'memset', 'exit', 'free', 'calloc', 'memcpy', 'memmove',
'GetVersion', 'printf', 'strchr', 'strncmp', 'fread',
'fclose', 'fprintf', 'sprintf', '_snprintf','fopen', 'strncpy', 'fseek', 'fwrite',
'strlen', 'strcmp', 'memchr', 'ferror',
'GetCurrentProcess', 'TerminateProcess', 'SetUnhandledExceptionFilter', 'GetCurrentProcessId',
'GetTickCount', 'InterlockedExchange', 'QueryPerformanceCounter', 'UnhandledExceptionFilter',
'IsDebuggerPresent', 'MultiByteToWideChar', 'GetSystemTimeAsFileTime', 'Sleep', 'GetCurrentThreadId',
'memcmp', '_lock', '_unlock'
])
def parse_index(absPath):
print "file: " + absPath
pe_file = pefile.PE(absPath)
index = esm.Index()
i = 0
for entry in pe_file.DIRECTORY_ENTRY_IMPORT:
#print entry.dll
for imp in entry.imports:
#print '\t', imp.name
name = imp.name
if(name and not name in COMMON):
i = i + 1
index.enter(name)
index.fix()
return i, index
'''
This function can count the match in a text file
'''
def ac_match(filename, index):
with open(filename,'r') as f:
input_text = f.read()
hits = index.query(input_text)
matches = []
for rng, match in hits:
matches.append(match)
return matches
def getfiles(root_dir):
fs = []
for root, _, files in os.walk(root_dir):
for fls in files:
fs.append(root + '/' + fls)
return fs
if __name__ == "__main__":
binary_name = sys.argv[1]
folder_name = sys.argv[2]
files = getfiles(folder_name)
i, index = parse_index(binary_name)
matches = []
for filename in files:
mtchs = ac_match(filename, index)
#print str(match_num) + " matches in " + filename
matches = matches + mtchs
matches = set(list(matches))
print "Number of signatures: " + str(i)
print "Totally " + str(len(matches)) + " matches."
#print str(len(matches) * 100 / long(i)) + "%" + " match"
print matches
| 28.551282 | 104 | 0.597665 | import os
import sys
import pefile
import esm
'''
tuning this parameter to get a better curracy
'''
COMMON = set([
'rand', 'malloc', 'realloc', 'memset', 'exit', 'free', 'calloc', 'memcpy', 'memmove',
'GetVersion', 'printf', 'strchr', 'strncmp', 'fread',
'fclose', 'fprintf', 'sprintf', '_snprintf','fopen', 'strncpy', 'fseek', 'fwrite',
'strlen', 'strcmp', 'memchr', 'ferror',
'GetCurrentProcess', 'TerminateProcess', 'SetUnhandledExceptionFilter', 'GetCurrentProcessId',
'GetTickCount', 'InterlockedExchange', 'QueryPerformanceCounter', 'UnhandledExceptionFilter',
'IsDebuggerPresent', 'MultiByteToWideChar', 'GetSystemTimeAsFileTime', 'Sleep', 'GetCurrentThreadId',
'memcmp', '_lock', '_unlock'
])
def parse_index(absPath):
print "file: " + absPath
pe_file = pefile.PE(absPath)
index = esm.Index()
i = 0
for entry in pe_file.DIRECTORY_ENTRY_IMPORT:
for imp in entry.imports:
name = imp.name
if(name and not name in COMMON):
i = i + 1
index.enter(name)
index.fix()
return i, index
'''
This function can count the match in a text file
'''
def ac_match(filename, index):
with open(filename,'r') as f:
input_text = f.read()
hits = index.query(input_text)
matches = []
for rng, match in hits:
matches.append(match)
return matches
def getfiles(root_dir):
fs = []
for root, _, files in os.walk(root_dir):
for fls in files:
fs.append(root + '/' + fls)
return fs
if __name__ == "__main__":
binary_name = sys.argv[1]
folder_name = sys.argv[2]
files = getfiles(folder_name)
i, index = parse_index(binary_name)
matches = []
for filename in files:
mtchs = ac_match(filename, index)
matches = matches + mtchs
matches = set(list(matches))
print "Number of signatures: " + str(i)
print "Totally " + str(len(matches)) + " matches."
print matches
| false | true |
f72b3aaaffce70e47700f8e60f660a0f8a235566 | 810 | py | Python | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 2 | 2021-09-26T07:03:54.000Z | 2022-02-21T15:46:30.000Z | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | null | null | null | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 1 | 2021-04-16T06:11:41.000Z | 2021-04-16T06:11:41.000Z | import logging
import time
import os
import sys
def create_logger(final_output_path, description=None):
if description is None:
log_file = '{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'))
else:
log_file = '{}_{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'), description)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file),
format=head)
clogger = logging.getLogger()
clogger.setLevel(logging.INFO)
# add handler
# print to stdout and log file
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
clogger.addHandler(ch)
return clogger | 33.75 | 89 | 0.653086 | import logging
import time
import os
import sys
def create_logger(final_output_path, description=None):
if description is None:
log_file = '{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'))
else:
log_file = '{}_{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'), description)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file),
format=head)
clogger = logging.getLogger()
clogger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
clogger.addHandler(ch)
return clogger | true | true |
f72b3aba051c3d6ff702a6ceb870932003b147bc | 975 | py | Python | examples/uncrc32.py | z3v2cicidi/impacket | d8da712c3dea013c61fe019a7efc7e1289ebb891 | [
"Apache-1.1"
] | 7 | 2018-06-06T05:19:36.000Z | 2022-03-16T02:04:47.000Z | impacket/examples/uncrc32.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | null | null | null | impacket/examples/uncrc32.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | 4 | 2015-02-25T20:08:18.000Z | 2021-07-06T12:31:29.000Z | # based on:
#
# Reversing CRC - Theory and Practice.
# HU Berlin Public Report
# SAR-PR-2006-05
# May 2006
# Authors:
# Martin Stigge, Henryk Plotz, Wolf Muller, Jens-Peter Redlich
FINALXOR = 0xffffffffL
INITXOR = 0xffffffffL
CRCPOLY = 0xEDB88320L
CRCINV = 0x5B358FD3L
from binascii import crc32
from struct import pack
def tableAt(byte):
return crc32(chr(byte ^ 0xff)) & 0xffffffff ^ FINALXOR ^ (INITXOR >> 8)
def compensate(buf, wanted):
wanted ^= FINALXOR
newBits = 0
for i in range(32):
if newBits & 1:
newBits >>= 1
newBits ^= CRCPOLY
else:
newBits >>= 1
if wanted & 1:
newBits ^= CRCINV
wanted >>= 1
newBits ^= crc32(buf) ^ FINALXOR
return pack('<L', newBits)
def main():
str = 'HOLA'
t = 0x12345678
print crc32(str + compensate(str, t)) == t
| 22.159091 | 75 | 0.544615 |
FINALXOR = 0xffffffffL
INITXOR = 0xffffffffL
CRCPOLY = 0xEDB88320L
CRCINV = 0x5B358FD3L
from binascii import crc32
from struct import pack
def tableAt(byte):
return crc32(chr(byte ^ 0xff)) & 0xffffffff ^ FINALXOR ^ (INITXOR >> 8)
def compensate(buf, wanted):
wanted ^= FINALXOR
newBits = 0
for i in range(32):
if newBits & 1:
newBits >>= 1
newBits ^= CRCPOLY
else:
newBits >>= 1
if wanted & 1:
newBits ^= CRCINV
wanted >>= 1
newBits ^= crc32(buf) ^ FINALXOR
return pack('<L', newBits)
def main():
str = 'HOLA'
t = 0x12345678
print crc32(str + compensate(str, t)) == t
| false | true |
f72b3aca2cee2eefced9efcef5337ee9bdadcbda | 719 | py | Python | arrays/height_checker.py | ChristianChiarulli/leetcode | 6920cea51b61feae9038b185c0b1172a93f6316a | [
"MIT"
] | 4 | 2020-12-09T03:53:21.000Z | 2021-03-30T12:28:21.000Z | .old/arrays/height_checker.py | ChristianChiarulli/data_structures_and_algorithms | 05c5c7c9db0cc0a15e83ba20d5bf4f6534b08fc1 | [
"MIT"
] | null | null | null | .old/arrays/height_checker.py | ChristianChiarulli/data_structures_and_algorithms | 05c5c7c9db0cc0a15e83ba20d5bf4f6534b08fc1 | [
"MIT"
] | 2 | 2020-06-12T17:00:07.000Z | 2020-07-13T20:13:56.000Z | # Students are asked to stand in non-decreasing
# order of heights for an annual photo.
# Return the minimum number of students that must
# move in order for all students to be standing in
# non-decreasing order of height.
# Notice that when a group of students is selected
# they can reorder in any possible way between themselves
# and the non selected students remain on their seats.
def heightchecker(heights):
swap_count = 0
sorted_heights = sorted(heights)
if heights == sorted_heights:
return swap_count
for i in range(len(heights)):
if heights[i] != sorted_heights[i]:
swap_count += 1
return swap_count
heights = [1, 1, 4, 2, 1, 3]
heightchecker(heights)
| 25.678571 | 57 | 0.707928 |
def heightchecker(heights):
swap_count = 0
sorted_heights = sorted(heights)
if heights == sorted_heights:
return swap_count
for i in range(len(heights)):
if heights[i] != sorted_heights[i]:
swap_count += 1
return swap_count
heights = [1, 1, 4, 2, 1, 3]
heightchecker(heights)
| true | true |
f72b3ae733f2a861d7932c76a99b7695e4c0bcce | 842 | py | Python | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 2 | 2021-02-01T17:30:46.000Z | 2021-02-22T13:59:49.000Z | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 106 | 2021-04-16T21:15:20.000Z | 2022-03-31T23:02:50.000Z | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lstmcpipe | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 3 | 2022-03-02T09:23:09.000Z | 2022-03-03T16:00:25.000Z | #!/usr/bin/env python
import argparse
from lstmcpipe.io.data_management import (
move_dir_content,
check_and_make_dir_without_verification,
)
parser = argparse.ArgumentParser(
description="Script to move a directory and its content after creating the destination"
" directory."
)
parser.add_argument(
"--source",
"-s",
type=str,
dest="source",
help="source argument of move_dir_content",
)
parser.add_argument(
"--destination",
"-d",
type=str,
dest="dest",
help="destination argument of move_dir_content",
)
def main():
args = parser.parse_args()
# check_and_make_dir(args.dest) CANNOT be used because if demands user interaction.
check_and_make_dir_without_verification(args.dest)
move_dir_content(args.source, args.dest)
if __name__ == "__main__":
main()
| 20.536585 | 91 | 0.704276 |
import argparse
from lstmcpipe.io.data_management import (
move_dir_content,
check_and_make_dir_without_verification,
)
parser = argparse.ArgumentParser(
description="Script to move a directory and its content after creating the destination"
" directory."
)
parser.add_argument(
"--source",
"-s",
type=str,
dest="source",
help="source argument of move_dir_content",
)
parser.add_argument(
"--destination",
"-d",
type=str,
dest="dest",
help="destination argument of move_dir_content",
)
def main():
args = parser.parse_args()
check_and_make_dir_without_verification(args.dest)
move_dir_content(args.source, args.dest)
if __name__ == "__main__":
main()
| true | true |
f72b3cc7e9db8fe13e7966a92394dc17106664c7 | 82,459 | py | Python | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | 2 | 2018-10-24T01:47:49.000Z | 2020-05-30T15:23:02.000Z | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | null | null | null | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | 1 | 2018-03-29T11:47:01.000Z | 2018-03-29T11:47:01.000Z | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_rcnn(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res2, res3, res4, res5 = self.get_resnet_backbone(data)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling')
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
# group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, mx.sym.BlockGrad(cls_prob), mx.sym.BlockGrad(bbox_loss), mx.sym.BlockGrad(rcnn_label)])
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| 85.009278 | 180 | 0.613323 |
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_rcnn(Symbol):
def __init__(self):
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
res2, res3, res4, res5 = self.get_resnet_backbone(data)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling')
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| true | true |
f72b3fd1a230b5adaf1f4d92129a73ad00db5a0e | 618 | py | Python | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | null | null | null | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | null | null | null | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | 1 | 2021-04-26T08:20:43.000Z | 2021-04-26T08:20:43.000Z | import logging
import flask
from controller.goods_controller import *
from controller.order_controller import *
from common.exception_advice import *
app = flask.Flask(__name__)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def main():
app.register_blueprint(exception_advice, url_prefix="/")
app.register_blueprint(order, url_prefix='/order')
app.register_blueprint(goods, url_prefix='/goods')
app.run(host="0.0.0.0", port=33023, debug=False, threaded=True)
if __name__ == '__main__':
main()
| 25.75 | 102 | 0.708738 | import logging
import flask
from controller.goods_controller import *
from controller.order_controller import *
from common.exception_advice import *
app = flask.Flask(__name__)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def main():
app.register_blueprint(exception_advice, url_prefix="/")
app.register_blueprint(order, url_prefix='/order')
app.register_blueprint(goods, url_prefix='/goods')
app.run(host="0.0.0.0", port=33023, debug=False, threaded=True)
if __name__ == '__main__':
main()
| true | true |
f72b4000190522de1de32f143323c9268b7fabb6 | 658 | py | Python | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | 1 | 2019-06-03T15:24:53.000Z | 2019-06-03T15:24:53.000Z | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
import pytest
def test_add_contact(app, db, json_contacts):
contact = json_contacts
with pytest.allure.step('Given a Contact list'):
old_contacts = db.get_contact_list()
with pytest.allure.step('When I add a contact %s to the list' % contact):
app.contact.create(contact)
with pytest.allure.step('Then the contact list is equal to the old list with the added contact'):
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 27.416667 | 105 | 0.702128 |
from model.contact import Contact
import pytest
def test_add_contact(app, db, json_contacts):
contact = json_contacts
with pytest.allure.step('Given a Contact list'):
old_contacts = db.get_contact_list()
with pytest.allure.step('When I add a contact %s to the list' % contact):
app.contact.create(contact)
with pytest.allure.step('Then the contact list is equal to the old list with the added contact'):
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| true | true |
f72b41cfb72a15d0999c4d3c54c0b25cae7907ac | 1,379 | py | Python | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | """
conftest.py pytest_fixtures can be accessed by multiple test files
test function has fixture func name as param, then fixture func called and result
passed to test func
added localhost.localdomain to /etc/hosts
"""
import pytest
from cnf.main import setup_app
import pymongo
config_name = 'testing'
the_app = setup_app(config_name, dict(
TESTING=True,
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SERVER_NAME='localhost.localdomain',
WTF_CSRF_ENABLED=False,
))
# the_app = setup_app()
the_app.app_context().push()
@pytest.fixture(scope='session')
def app():
"""Makes app parameter available to test funcs"""
return the_app
@pytest.fixture(scope='session', autouse=True)
def db():
"""Create a test copy of cnf for session"""
client = pymongo.MongoClient("localhost", 27017)
if not client['cnf_test']:
client.admin.command('copydb', fromdb='cnf',
todb='cnf_test')
db = client['cnf_test']
#delete example_user from user collection
user_coll = db.users
myquery = {"username": "example_user"}
user_coll.delete_one(myquery)
return db
@pytest.fixture(scope='function')
def data():
pass
@pytest.fixture(scope='session')
def client(app):
return app.test_client()
| 24.192982 | 81 | 0.648296 | import pytest
from cnf.main import setup_app
import pymongo
config_name = 'testing'
the_app = setup_app(config_name, dict(
TESTING=True,
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SERVER_NAME='localhost.localdomain',
WTF_CSRF_ENABLED=False,
))
the_app.app_context().push()
@pytest.fixture(scope='session')
def app():
return the_app
@pytest.fixture(scope='session', autouse=True)
def db():
client = pymongo.MongoClient("localhost", 27017)
if not client['cnf_test']:
client.admin.command('copydb', fromdb='cnf',
todb='cnf_test')
db = client['cnf_test']
user_coll = db.users
myquery = {"username": "example_user"}
user_coll.delete_one(myquery)
return db
@pytest.fixture(scope='function')
def data():
pass
@pytest.fixture(scope='session')
def client(app):
return app.test_client()
| true | true |
f72b41deca8203786bd3da834a3d9db8b40ffee7 | 14,866 | py | Python | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistics generation options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import types as python_types
from typing import Dict, List, Optional, Text
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import example_weight_map
from google.protobuf import json_format
from tensorflow_metadata.proto.v0 import schema_pb2
_SCHEMA_JSON_KEY = 'schema_json'
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY = 'per_feature_weight_override_json'
# TODO(b/68277922): Currently we use a single epsilon (error tolerance)
# parameter for all histograms. Set this parameter specific to each
# histogram based on the number of buckets.
# TODO(b/118833241): Set MI default configs when MI is a default generator
class StatsOptions(object):
"""Options for generating statistics."""
def __init__(
self,
generators: Optional[List[stats_generator.StatsGenerator]] = None,
feature_whitelist: Optional[List[types.FeatureName]] = None,
schema: Optional[schema_pb2.Schema] = None,
label_feature: Optional[types.FeatureName] = None,
weight_feature: Optional[types.FeatureName] = None,
slice_functions: Optional[List[types.SliceFunction]] = None,
sample_rate: Optional[float] = None,
num_top_values: int = 20,
frequency_threshold: int = 1,
weighted_frequency_threshold: float = 1.0,
num_rank_histogram_buckets: int = 1000,
num_values_histogram_buckets: int = 10,
num_histogram_buckets: int = 10,
num_quantiles_histogram_buckets: int = 10,
epsilon: float = 0.01,
infer_type_from_schema: bool = False,
desired_batch_size: Optional[int] = None,
enable_semantic_domain_stats: bool = False,
semantic_domain_stats_sample_rate: Optional[float] = None,
per_feature_weight_override: Optional[Dict[types.FeaturePath,
types.FeatureName]] = None):
"""Initializes statistics options.
Args:
generators: An optional list of statistics generators. A statistics
generator must extend either CombinerStatsGenerator or
TransformStatsGenerator.
feature_whitelist: An optional list of names of the features to calculate
statistics for.
schema: An optional tensorflow_metadata Schema proto. Currently we use the
schema to infer categorical and bytes features.
label_feature: An optional feature name which represents the label.
weight_feature: An optional feature name whose numeric value represents
the weight of an example.
slice_functions: An optional list of functions that generate slice keys
for each example. Each slice function should take an example dict as
input and return a list of zero or more slice keys.
sample_rate: An optional sampling rate. If specified, statistics is
computed over the sample.
num_top_values: An optional number of most frequent feature values to keep
for string features.
frequency_threshold: An optional minimum number of examples the most
frequent values must be present in.
weighted_frequency_threshold: An optional minimum weighted number of
examples the most frequent weighted values must be present in. This
option is only relevant when a weight_feature is specified.
num_rank_histogram_buckets: An optional number of buckets in the rank
histogram for string features.
num_values_histogram_buckets: An optional number of buckets in a quantiles
histogram for the number of values per Feature, which is stored in
CommonStatistics.num_values_histogram.
num_histogram_buckets: An optional number of buckets in a standard
NumericStatistics.histogram with equal-width buckets.
num_quantiles_histogram_buckets: An optional number of buckets in a
quantiles NumericStatistics.histogram.
epsilon: An optional error tolerance for the computation of quantiles,
typically a small fraction close to zero (e.g. 0.01). Higher values of
epsilon increase the quantile approximation, and hence result in more
unequal buckets, but could improve performance, and resource
consumption.
infer_type_from_schema: A boolean to indicate whether the feature types
should be inferred from the schema. If set to True, an input schema
must be provided. This flag is used only when generating statistics
on CSV data.
desired_batch_size: An optional number of examples to include in each
batch that is passed to the statistics generators.
enable_semantic_domain_stats: If True statistics for semantic domains are
generated (e.g: image, text domains).
semantic_domain_stats_sample_rate: An optional sampling rate for semantic
domain statistics. If specified, semantic domain statistics is computed
over a sample.
per_feature_weight_override: If specified, the "example weight" paired
with a feature will be first looked up in this map and if not found,
fall back to `weight_feature`.
"""
self.generators = generators
self.feature_whitelist = feature_whitelist
self.schema = schema
self.label_feature = label_feature
self.weight_feature = weight_feature
self.slice_functions = slice_functions
self.sample_rate = sample_rate
self.num_top_values = num_top_values
self.frequency_threshold = frequency_threshold
self.weighted_frequency_threshold = weighted_frequency_threshold
self.num_rank_histogram_buckets = num_rank_histogram_buckets
self.num_values_histogram_buckets = num_values_histogram_buckets
self.num_histogram_buckets = num_histogram_buckets
self.num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
self.epsilon = epsilon
self.infer_type_from_schema = infer_type_from_schema
self.desired_batch_size = desired_batch_size
self.enable_semantic_domain_stats = enable_semantic_domain_stats
self.semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
self._per_feature_weight_override = per_feature_weight_override
def to_json(self) -> Text:
"""Convert from an object to JSON representation of the __dict__ attribute.
Custom generators and slice_functions are skipped, meaning that they will
not be used when running TFDV in a setting where the stats options have been
json-serialized, first. This will happen in the case where TFDV is run as a
TFX component. The schema proto will be json_encoded.
Returns:
A JSON representation of a filtered version of __dict__.
"""
options_dict = copy.copy(self.__dict__)
options_dict['_slice_functions'] = None
options_dict['_generators'] = None
if self.schema is not None:
del options_dict['_schema']
options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)
if self._per_feature_weight_override is not None:
del options_dict['_per_feature_weight_override']
options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {
k.to_json(): v for k, v in self._per_feature_weight_override.items()
}
return json.dumps(options_dict)
@classmethod
def from_json(cls, options_json: Text) -> 'StatsOptions':
"""Construct an instance of stats options from a JSON representation.
Args:
options_json: A JSON representation of the __dict__ attribute of a
StatsOptions instance.
Returns:
A StatsOptions instance constructed by setting the __dict__ attribute to
the deserialized value of options_json.
"""
options_dict = json.loads(options_json)
if _SCHEMA_JSON_KEY in options_dict:
options_dict['_schema'] = json_format.Parse(
options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())
del options_dict[_SCHEMA_JSON_KEY]
per_feature_weight_override_json = options_dict.get(
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)
if per_feature_weight_override_json is not None:
options_dict['_per_feature_weight_override'] = {
types.FeaturePath.from_json(k): v
for k, v in per_feature_weight_override_json.items()
}
del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]
options = cls()
options.__dict__ = options_dict
return options
@property
def generators(self) -> Optional[List[stats_generator.StatsGenerator]]:
return self._generators
@generators.setter
def generators(
self, generators: Optional[List[stats_generator.StatsGenerator]]) -> None:
if generators is not None:
if not isinstance(generators, list):
raise TypeError('generators is of type %s, should be a list.' %
type(generators).__name__)
for generator in generators:
if not isinstance(generator, (
stats_generator.CombinerStatsGenerator,
stats_generator.TransformStatsGenerator,
stats_generator.CombinerFeatureStatsGenerator,
)):
raise TypeError(
'Statistics generator must extend one of '
'CombinerStatsGenerator, TransformStatsGenerator, or '
'CombinerFeatureStatsGenerator found object of type %s.' %
generator.__class__.__name__)
self._generators = generators
@property
def feature_whitelist(self) -> Optional[List[types.FeatureName]]:
return self._feature_whitelist
@feature_whitelist.setter
def feature_whitelist(
self, feature_whitelist: Optional[List[types.FeatureName]]) -> None:
if feature_whitelist is not None and not isinstance(feature_whitelist,
list):
raise TypeError('feature_whitelist is of type %s, should be a list.' %
type(feature_whitelist).__name__)
self._feature_whitelist = feature_whitelist
@property
def schema(self) -> Optional[schema_pb2.Schema]:
return self._schema
@schema.setter
def schema(self, schema: Optional[schema_pb2.Schema]) -> None:
if schema is not None and not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
self._schema = schema
@property
def slice_functions(self) -> Optional[List[types.SliceFunction]]:
return self._slice_functions
@slice_functions.setter
def slice_functions(
self, slice_functions: Optional[List[types.SliceFunction]]) -> None:
if slice_functions is not None:
if not isinstance(slice_functions, list):
raise TypeError('slice_functions is of type %s, should be a list.' %
type(slice_functions).__name__)
for slice_function in slice_functions:
if not isinstance(slice_function, python_types.FunctionType):
raise TypeError('slice_functions must contain functions only.')
self._slice_functions = slice_functions
@property
def sample_rate(self) -> Optional[float]:
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate: Optional[float]):
if sample_rate is not None:
if not 0 < sample_rate <= 1:
raise ValueError('Invalid sample_rate %f' % sample_rate)
self._sample_rate = sample_rate
@property
def num_values_histogram_buckets(self) -> int:
return self._num_values_histogram_buckets
@num_values_histogram_buckets.setter
def num_values_histogram_buckets(self,
num_values_histogram_buckets: int) -> None:
# TODO(b/120164508): Disallow num_values_histogram_buckets = 1 because it
# causes the underlying quantile op to fail. If the quantile op is modified
# to support num_quantiles = 1, then allow num_values_histogram_buckets = 1.
if num_values_histogram_buckets <= 1:
raise ValueError('Invalid num_values_histogram_buckets %d' %
num_values_histogram_buckets)
self._num_values_histogram_buckets = num_values_histogram_buckets
@property
def num_histogram_buckets(self) -> int:
return self._num_histogram_buckets
@num_histogram_buckets.setter
def num_histogram_buckets(self, num_histogram_buckets: int) -> None:
if num_histogram_buckets < 1:
raise ValueError(
'Invalid num_histogram_buckets %d' % num_histogram_buckets)
self._num_histogram_buckets = num_histogram_buckets
@property
def num_quantiles_histogram_buckets(self) -> int:
return self._num_quantiles_histogram_buckets
@num_quantiles_histogram_buckets.setter
def num_quantiles_histogram_buckets(
self, num_quantiles_histogram_buckets: int) -> None:
if num_quantiles_histogram_buckets < 1:
raise ValueError('Invalid num_quantiles_histogram_buckets %d' %
num_quantiles_histogram_buckets)
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
@property
def desired_batch_size(self) -> Optional[int]:
return self._desired_batch_size
@desired_batch_size.setter
def desired_batch_size(self, desired_batch_size: Optional[int]) -> None:
if desired_batch_size is not None and desired_batch_size < 1:
raise ValueError('Invalid desired_batch_size %d' %
desired_batch_size)
self._desired_batch_size = desired_batch_size
@property
def semantic_domain_stats_sample_rate(self) -> Optional[float]:
return self._semantic_domain_stats_sample_rate
@semantic_domain_stats_sample_rate.setter
def semantic_domain_stats_sample_rate(
self, semantic_domain_stats_sample_rate: Optional[float]):
if semantic_domain_stats_sample_rate is not None:
if not 0 < semantic_domain_stats_sample_rate <= 1:
raise ValueError('Invalid semantic_domain_stats_sample_rate %f'
% semantic_domain_stats_sample_rate)
self._semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
@property
def example_weight_map(self):
return example_weight_map.ExampleWeightMap(
self.weight_feature, self._per_feature_weight_override)
| 43.852507 | 80 | 0.735907 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import types as python_types
from typing import Dict, List, Optional, Text
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import example_weight_map
from google.protobuf import json_format
from tensorflow_metadata.proto.v0 import schema_pb2
_SCHEMA_JSON_KEY = 'schema_json'
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY = 'per_feature_weight_override_json'
class StatsOptions(object):
def __init__(
self,
generators: Optional[List[stats_generator.StatsGenerator]] = None,
feature_whitelist: Optional[List[types.FeatureName]] = None,
schema: Optional[schema_pb2.Schema] = None,
label_feature: Optional[types.FeatureName] = None,
weight_feature: Optional[types.FeatureName] = None,
slice_functions: Optional[List[types.SliceFunction]] = None,
sample_rate: Optional[float] = None,
num_top_values: int = 20,
frequency_threshold: int = 1,
weighted_frequency_threshold: float = 1.0,
num_rank_histogram_buckets: int = 1000,
num_values_histogram_buckets: int = 10,
num_histogram_buckets: int = 10,
num_quantiles_histogram_buckets: int = 10,
epsilon: float = 0.01,
infer_type_from_schema: bool = False,
desired_batch_size: Optional[int] = None,
enable_semantic_domain_stats: bool = False,
semantic_domain_stats_sample_rate: Optional[float] = None,
per_feature_weight_override: Optional[Dict[types.FeaturePath,
types.FeatureName]] = None):
self.generators = generators
self.feature_whitelist = feature_whitelist
self.schema = schema
self.label_feature = label_feature
self.weight_feature = weight_feature
self.slice_functions = slice_functions
self.sample_rate = sample_rate
self.num_top_values = num_top_values
self.frequency_threshold = frequency_threshold
self.weighted_frequency_threshold = weighted_frequency_threshold
self.num_rank_histogram_buckets = num_rank_histogram_buckets
self.num_values_histogram_buckets = num_values_histogram_buckets
self.num_histogram_buckets = num_histogram_buckets
self.num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
self.epsilon = epsilon
self.infer_type_from_schema = infer_type_from_schema
self.desired_batch_size = desired_batch_size
self.enable_semantic_domain_stats = enable_semantic_domain_stats
self.semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
self._per_feature_weight_override = per_feature_weight_override
def to_json(self) -> Text:
options_dict = copy.copy(self.__dict__)
options_dict['_slice_functions'] = None
options_dict['_generators'] = None
if self.schema is not None:
del options_dict['_schema']
options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)
if self._per_feature_weight_override is not None:
del options_dict['_per_feature_weight_override']
options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {
k.to_json(): v for k, v in self._per_feature_weight_override.items()
}
return json.dumps(options_dict)
@classmethod
def from_json(cls, options_json: Text) -> 'StatsOptions':
options_dict = json.loads(options_json)
if _SCHEMA_JSON_KEY in options_dict:
options_dict['_schema'] = json_format.Parse(
options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())
del options_dict[_SCHEMA_JSON_KEY]
per_feature_weight_override_json = options_dict.get(
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)
if per_feature_weight_override_json is not None:
options_dict['_per_feature_weight_override'] = {
types.FeaturePath.from_json(k): v
for k, v in per_feature_weight_override_json.items()
}
del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]
options = cls()
options.__dict__ = options_dict
return options
@property
def generators(self) -> Optional[List[stats_generator.StatsGenerator]]:
return self._generators
@generators.setter
def generators(
self, generators: Optional[List[stats_generator.StatsGenerator]]) -> None:
if generators is not None:
if not isinstance(generators, list):
raise TypeError('generators is of type %s, should be a list.' %
type(generators).__name__)
for generator in generators:
if not isinstance(generator, (
stats_generator.CombinerStatsGenerator,
stats_generator.TransformStatsGenerator,
stats_generator.CombinerFeatureStatsGenerator,
)):
raise TypeError(
'Statistics generator must extend one of '
'CombinerStatsGenerator, TransformStatsGenerator, or '
'CombinerFeatureStatsGenerator found object of type %s.' %
generator.__class__.__name__)
self._generators = generators
@property
def feature_whitelist(self) -> Optional[List[types.FeatureName]]:
return self._feature_whitelist
@feature_whitelist.setter
def feature_whitelist(
self, feature_whitelist: Optional[List[types.FeatureName]]) -> None:
if feature_whitelist is not None and not isinstance(feature_whitelist,
list):
raise TypeError('feature_whitelist is of type %s, should be a list.' %
type(feature_whitelist).__name__)
self._feature_whitelist = feature_whitelist
@property
def schema(self) -> Optional[schema_pb2.Schema]:
return self._schema
@schema.setter
def schema(self, schema: Optional[schema_pb2.Schema]) -> None:
if schema is not None and not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
self._schema = schema
@property
def slice_functions(self) -> Optional[List[types.SliceFunction]]:
return self._slice_functions
@slice_functions.setter
def slice_functions(
self, slice_functions: Optional[List[types.SliceFunction]]) -> None:
if slice_functions is not None:
if not isinstance(slice_functions, list):
raise TypeError('slice_functions is of type %s, should be a list.' %
type(slice_functions).__name__)
for slice_function in slice_functions:
if not isinstance(slice_function, python_types.FunctionType):
raise TypeError('slice_functions must contain functions only.')
self._slice_functions = slice_functions
@property
def sample_rate(self) -> Optional[float]:
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate: Optional[float]):
if sample_rate is not None:
if not 0 < sample_rate <= 1:
raise ValueError('Invalid sample_rate %f' % sample_rate)
self._sample_rate = sample_rate
@property
def num_values_histogram_buckets(self) -> int:
return self._num_values_histogram_buckets
@num_values_histogram_buckets.setter
def num_values_histogram_buckets(self,
num_values_histogram_buckets: int) -> None:
if num_values_histogram_buckets <= 1:
raise ValueError('Invalid num_values_histogram_buckets %d' %
num_values_histogram_buckets)
self._num_values_histogram_buckets = num_values_histogram_buckets
@property
def num_histogram_buckets(self) -> int:
return self._num_histogram_buckets
@num_histogram_buckets.setter
def num_histogram_buckets(self, num_histogram_buckets: int) -> None:
if num_histogram_buckets < 1:
raise ValueError(
'Invalid num_histogram_buckets %d' % num_histogram_buckets)
self._num_histogram_buckets = num_histogram_buckets
@property
def num_quantiles_histogram_buckets(self) -> int:
return self._num_quantiles_histogram_buckets
@num_quantiles_histogram_buckets.setter
def num_quantiles_histogram_buckets(
self, num_quantiles_histogram_buckets: int) -> None:
if num_quantiles_histogram_buckets < 1:
raise ValueError('Invalid num_quantiles_histogram_buckets %d' %
num_quantiles_histogram_buckets)
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
@property
def desired_batch_size(self) -> Optional[int]:
return self._desired_batch_size
@desired_batch_size.setter
def desired_batch_size(self, desired_batch_size: Optional[int]) -> None:
if desired_batch_size is not None and desired_batch_size < 1:
raise ValueError('Invalid desired_batch_size %d' %
desired_batch_size)
self._desired_batch_size = desired_batch_size
@property
def semantic_domain_stats_sample_rate(self) -> Optional[float]:
return self._semantic_domain_stats_sample_rate
@semantic_domain_stats_sample_rate.setter
def semantic_domain_stats_sample_rate(
self, semantic_domain_stats_sample_rate: Optional[float]):
if semantic_domain_stats_sample_rate is not None:
if not 0 < semantic_domain_stats_sample_rate <= 1:
raise ValueError('Invalid semantic_domain_stats_sample_rate %f'
% semantic_domain_stats_sample_rate)
self._semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
@property
def example_weight_map(self):
return example_weight_map.ExampleWeightMap(
self.weight_feature, self._per_feature_weight_override)
| true | true |
f72b42d5c27b87418372e74671846c4f0f9ab98a | 3,825 | py | Python | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
strm = vthread.Stream(m, 'mystream', clk, rst)
a = strm.source('a')
size = strm.parameter('size')
index, _max, argmax_valid = strm.ReduceArgMaxValid(a, size)
strm.sink(index, 'index', when=argmax_valid, when_name='argmax_valid')
def comp_stream(size, offset):
strm.set_source('a', ram_a, offset, size)
strm.set_parameter('size', size)
strm.set_sink('index', ram_b, offset, 1)
strm.run()
strm.join()
def comp_sequential(size, offset):
index = 0
_max = 0
for i in range(size):
a = ram_a.read(i + offset)
if a > _max:
index = i
_max = a
ram_b.write(offset, index)
def check(size, offset_stream, offset_seq):
all_ok = True
for i in range(size):
st = ram_b.read(i + offset_stream)
sq = ram_b.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_stream(size, offset)
myaxi.dma_write(ram_b, offset, 1024, 1)
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_sequential(size, offset)
myaxi.dma_write(ram_b, offset, 1024 * 2, 1)
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| 27.12766 | 87 | 0.602092 | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
strm = vthread.Stream(m, 'mystream', clk, rst)
a = strm.source('a')
size = strm.parameter('size')
index, _max, argmax_valid = strm.ReduceArgMaxValid(a, size)
strm.sink(index, 'index', when=argmax_valid, when_name='argmax_valid')
def comp_stream(size, offset):
strm.set_source('a', ram_a, offset, size)
strm.set_parameter('size', size)
strm.set_sink('index', ram_b, offset, 1)
strm.run()
strm.join()
def comp_sequential(size, offset):
index = 0
_max = 0
for i in range(size):
a = ram_a.read(i + offset)
if a > _max:
index = i
_max = a
ram_b.write(offset, index)
def check(size, offset_stream, offset_seq):
all_ok = True
for i in range(size):
st = ram_b.read(i + offset_stream)
sq = ram_b.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_stream(size, offset)
myaxi.dma_write(ram_b, offset, 1024, 1)
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_sequential(size, offset)
myaxi.dma_write(ram_b, offset, 1024 * 2, 1)
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
return m
def mkTest(memimg_name=None):
m = Module('test')
led = mkLed()
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| true | true |
f72b4311c1c935e36ff12b3275ecdd0411d9a83c | 2,457 | py | Python | solfasol/shop/migrations/0004_auto_20201004_2111.py | rekognize/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | null | null | null | solfasol/shop/migrations/0004_auto_20201004_2111.py | rekognize/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | 1 | 2020-06-18T13:08:47.000Z | 2020-06-18T13:08:47.000Z | solfasol/shop/migrations/0004_auto_20201004_2111.py | Solfasol/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-04 18:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0009_auto_20200918_0020'),
('shop', '0003_auto_20201004_2109'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'cart',
'verbose_name_plural': 'carts',
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.item', verbose_name='item')),
],
),
migrations.CreateModel(
name='CartIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.issue', verbose_name='issue')),
],
),
migrations.AddField(
model_name='cart',
name='issues',
field=models.ManyToManyField(blank=True, through='shop.CartIssue', to='issues.Issue'),
),
migrations.AddField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, through='shop.CartItem', to='shop.Item'),
),
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.cart', verbose_name='cart'),
),
]
| 40.95 | 142 | 0.580383 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0009_auto_20200918_0020'),
('shop', '0003_auto_20201004_2109'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'cart',
'verbose_name_plural': 'carts',
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.item', verbose_name='item')),
],
),
migrations.CreateModel(
name='CartIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.issue', verbose_name='issue')),
],
),
migrations.AddField(
model_name='cart',
name='issues',
field=models.ManyToManyField(blank=True, through='shop.CartIssue', to='issues.Issue'),
),
migrations.AddField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, through='shop.CartItem', to='shop.Item'),
),
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.cart', verbose_name='cart'),
),
]
| true | true |
f72b433198e89063d5e6f1584dc14c04ef6a68fb | 2,261 | py | Python | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | 3 | 2019-01-20T08:55:06.000Z | 2019-12-02T05:59:26.000Z | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | null | null | null | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | 3 | 2019-08-06T07:27:34.000Z | 2019-09-28T23:26:57.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018 MIT Probabilistic Computing Project.
# Released under Apache 2.0; refer to LICENSE.txt.
from collections import Counter
import numpy as np
from cgpm.utils.general import get_prng
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.transition_hypers import transition_hyper_grids
from cgpm2.transition_hypers import transition_hypers
from cgpm2.transition_rows import transition_rows
from cgpm2.walks import get_cgpms_by_output_index
def test_transition_crp_mixture():
prng = get_prng(2)
data = np.concatenate((
prng.normal(loc=0, scale=2, size=20),
prng.normal(loc=30, scale=1, size=20),
prng.normal(loc=-30, scale=1, size=20),
))
infinite_mixture = FlexibleRowMixture(
cgpm_row_divide=CRP([1], [], rng=prng),
cgpm_components_base=Normal([0], [], rng=prng),
rng=prng
)
for rowid, value in enumerate(data):
infinite_mixture.observe(rowid, {0: value})
cgpms = {
0 : get_cgpms_by_output_index(infinite_mixture, 0),
1 : get_cgpms_by_output_index(infinite_mixture, 1),
}
grids = {
0 : transition_hyper_grids(cgpms[0], 30),
1 : transition_hyper_grids(cgpms[1], 30),
}
for _step in xrange(50):
rowids = prng.permutation(range(len(data)))
for rowid in rowids:
transition_rows(infinite_mixture, rowid, prng)
for output in infinite_mixture.outputs:
transition_hypers(cgpms[output], grids[output], prng)
rowids = range(60)
assignments0 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[00:20]]
assignments1 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[20:40]]
assignments2 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[40:60]]
mode0 = Counter(assignments0).most_common(1)[0][0]
mode1 = Counter(assignments1).most_common(1)[0][0]
mode2 = Counter(assignments2).most_common(1)[0][0]
assert sum(a==mode0 for a in assignments0) > int(0.95*len(assignments0))
assert sum(a==mode1 for a in assignments1) > int(0.95*len(assignments1))
assert sum(a==mode2 for a in assignments2) > int(0.95*len(assignments2))
| 37.065574 | 80 | 0.689518 |
from collections import Counter
import numpy as np
from cgpm.utils.general import get_prng
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.transition_hypers import transition_hyper_grids
from cgpm2.transition_hypers import transition_hypers
from cgpm2.transition_rows import transition_rows
from cgpm2.walks import get_cgpms_by_output_index
def test_transition_crp_mixture():
prng = get_prng(2)
data = np.concatenate((
prng.normal(loc=0, scale=2, size=20),
prng.normal(loc=30, scale=1, size=20),
prng.normal(loc=-30, scale=1, size=20),
))
infinite_mixture = FlexibleRowMixture(
cgpm_row_divide=CRP([1], [], rng=prng),
cgpm_components_base=Normal([0], [], rng=prng),
rng=prng
)
for rowid, value in enumerate(data):
infinite_mixture.observe(rowid, {0: value})
cgpms = {
0 : get_cgpms_by_output_index(infinite_mixture, 0),
1 : get_cgpms_by_output_index(infinite_mixture, 1),
}
grids = {
0 : transition_hyper_grids(cgpms[0], 30),
1 : transition_hyper_grids(cgpms[1], 30),
}
for _step in xrange(50):
rowids = prng.permutation(range(len(data)))
for rowid in rowids:
transition_rows(infinite_mixture, rowid, prng)
for output in infinite_mixture.outputs:
transition_hypers(cgpms[output], grids[output], prng)
rowids = range(60)
assignments0 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[00:20]]
assignments1 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[20:40]]
assignments2 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[40:60]]
mode0 = Counter(assignments0).most_common(1)[0][0]
mode1 = Counter(assignments1).most_common(1)[0][0]
mode2 = Counter(assignments2).most_common(1)[0][0]
assert sum(a==mode0 for a in assignments0) > int(0.95*len(assignments0))
assert sum(a==mode1 for a in assignments1) > int(0.95*len(assignments1))
assert sum(a==mode2 for a in assignments2) > int(0.95*len(assignments2))
| true | true |
f72b43a85de65bf336aa1e3f70ddb2708ba53801 | 15,647 | py | Python | webStorm-APICloud/python_tools/Lib/test/test_exceptions.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/test/test_exceptions.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/test/test_exceptions.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | # Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle, cPickle
import warnings
from test.test_support import TESTFN, unlink, run_unittest, captured_output
from test.test_pep352 import ignore_message_warning
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def testReload(self):
# Reloading the built-in exceptions module failed prior to Py2.2, while it
# should act the same as reloading built-in sys.
try:
import exceptions
reload(exceptions)
except ImportError, e:
self.fail("reloading exceptions: %s" % e)
def raise_catch(self, exc, excname):
try:
raise exc, "spam"
except exc, err:
buf1 = str(err)
try:
raise exc("spam")
except exc, err:
buf2 = str(err)
self.assertEquals(buf1, buf2)
self.assertEquals(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec '/\n'
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 10000)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception, e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self_):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "test_capi1")
self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "__init__")
self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEquals(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.failUnlessEqual(str(WindowsError(1001)),
"1001")
self.failUnlessEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.failUnlessEqual(WindowsError(1001, "message").errno, 22)
self.failUnlessEqual(WindowsError(1001, "message").winerror, 1001)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'message' : '', 'args' : ()}),
(BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
(BaseException, ('foo',),
{'message' : 'foo', 'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'message' : '', 'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'message' : '', 'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'message' : '', 'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'message' : '', 'args' : (),}),
(UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', u'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : u'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', '\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : '\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
{'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
'object' : u'\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
with warnings.catch_warnings():
ignore_message_warning()
for exc, args, expected in exceptionList:
try:
raise exc(*args)
except BaseException, e:
if type(e) is not exc:
raise
# Verify module name
self.assertEquals(type(e).__module__, 'exceptions')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
self.assertEquals(repr(getattr(e, checkArgName)),
repr(expected[checkArgName]),
'exception "%s", attribute "%s"' %
(repr(e), checkArgName))
# test for pickling support
for p in pickle, cPickle:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
new = p.loads(p.dumps(e, protocol))
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEquals(got, want,
'pickled "%r", attribute "%s"' %
(e, checkArgName))
def testSlicing(self):
# Test that you can slice an exception directly instead of requiring
# going through the 'args' attribute.
args = (1, 2, 3)
exc = BaseException(*args)
self.failUnlessEqual(exc[:], args)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEquals(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
# The test prints an unraisable recursion error when
# doing "except ValueError", this is because subclass
# checking has recursion checking too.
with captured_output("stderr"):
try:
g()
except RuntimeError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def testUnicodeStrUsage(self):
# Make sure both instances and classes have a str and unicode
# representation.
self.failUnless(str(Exception))
self.failUnless(unicode(Exception))
self.failUnless(str(Exception('a')))
self.failUnless(unicode(Exception(u'a')))
self.failUnless(unicode(Exception(u'\xe1')))
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception):
__metaclass__ = Meta
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException, e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
with captured_output("stderr") as stderr:
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assert_(e is RuntimeError, e)
self.assert_("maximum recursion depth exceeded" in str(v), v)
def test_main():
run_unittest(ExceptionTests)
if __name__ == '__main__':
test_main()
| 39.31407 | 88 | 0.495686 |
import os
import sys
import unittest
import pickle, cPickle
import warnings
from test.test_support import TESTFN, unlink, run_unittest, captured_output
from test.test_pep352 import ignore_message_warning
class ExceptionTests(unittest.TestCase):
def testReload(self):
try:
import exceptions
reload(exceptions)
except ImportError, e:
self.fail("reloading exceptions: %s" % e)
def raise_catch(self, exc, excname):
try:
raise exc, "spam"
except exc, err:
buf1 = str(err)
try:
raise exc("spam")
except exc, err:
buf2 = str(err)
self.assertEquals(buf1, buf2)
self.assertEquals(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec '/\n'
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 10000)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception, e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self_):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "test_capi1")
self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "__init__")
self.assert_(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEquals(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.failUnlessEqual(str(WindowsError(1001)),
"1001")
self.failUnlessEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.failUnlessEqual(WindowsError(1001, "message").errno, 22)
self.failUnlessEqual(WindowsError(1001, "message").winerror, 1001)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'message' : '', 'args' : ()}),
(BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
(BaseException, ('foo',),
{'message' : 'foo', 'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'message' : '', 'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'message' : '', 'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'message' : '', 'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'message' : '', 'args' : (),}),
(UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', u'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : u'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', '\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : '\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
{'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
'object' : u'\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
with warnings.catch_warnings():
ignore_message_warning()
for exc, args, expected in exceptionList:
try:
raise exc(*args)
except BaseException, e:
if type(e) is not exc:
raise
# Verify module name
self.assertEquals(type(e).__module__, 'exceptions')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
self.assertEquals(repr(getattr(e, checkArgName)),
repr(expected[checkArgName]),
'exception "%s", attribute "%s"' %
(repr(e), checkArgName))
# test for pickling support
for p in pickle, cPickle:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
new = p.loads(p.dumps(e, protocol))
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEquals(got, want,
'pickled "%r", attribute "%s"' %
(e, checkArgName))
def testSlicing(self):
# Test that you can slice an exception directly instead of requiring
# going through the 'args' attribute.
args = (1, 2, 3)
exc = BaseException(*args)
self.failUnlessEqual(exc[:], args)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEquals(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
with captured_output("stderr"):
try:
g()
except RuntimeError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def testUnicodeStrUsage(self):
self.failUnless(str(Exception))
self.failUnless(unicode(Exception))
self.failUnless(str(Exception('a')))
self.failUnless(unicode(Exception(u'a')))
self.failUnless(unicode(Exception(u'\xe1')))
def test_badisinstance(self):
classcheck__(cls, subclass):
raise ValueError()
class MyException(Exception):
__metaclass__ = Meta
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException, e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
with captured_output("stderr") as stderr:
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assert_(e is RuntimeError, e)
self.assert_("maximum recursion depth exceeded" in str(v), v)
def test_main():
run_unittest(ExceptionTests)
if __name__ == '__main__':
test_main()
| false | true |
f72b440bbd74e2d435413383ae7761a669cd513a | 68,958 | py | Python | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 1 | 2021-07-01T17:52:12.000Z | 2021-07-01T17:52:12.000Z | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 3 | 2021-01-07T23:36:40.000Z | 2021-12-13T20:43:27.000Z | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 2 | 2020-02-24T16:06:12.000Z | 2021-02-24T00:21:32.000Z | '''
XbrlSemanticSqlDB.py implements an SQL database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module may save directly to a Postgres, MySQL, SQLite, MSSQL, or Oracle server.
This module provides the execution context for saving a dts and instances in
XBRL SQL database. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for SQL Server
port: the host port of server
user, password: if needed for server
database: the top level path segment for the SQL Server
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to use from command line:
linux
# be sure plugin is installed
arelleCmdLine --plugin '+xbrlDB|show'
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB 'myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds'
windows
rem be sure plugin is installed
arelleCmdLine --plugin "xbrlDB"
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB "myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds"
'''
import os, time, datetime, logging
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence
from arelle import XbrlConst
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, entrypoint=None, rssItem=None, **kwargs):
xbrlDbConn = None
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs: # initialize batch
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
xbrlDbConn.insertXbrl(entrypoint, rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"aspect", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"data_point", "entity", "period", "unit", "unit_measure", "aspect_value_selection",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
# if no tables, initialize database
if missingTables == XBRLDBTABLES:
self.create(os.path.join("sql", "semantic", {"mssql": "xbrlSemanticMSSqlDB.sql",
"mysql": "xbrlSemanticMySqlDB.ddl",
"sqlite": "xbrlSemanticSQLiteDB.ddl",
"orcl": "xbrlSemanticOracleDB.sql",
"postgres": "xbrlSemanticPostgresDB.ddl"}[self.product]))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, entrypoint, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
# get logging entries (needed to find which aspects to identify)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
# must have a valid XBRL instance or document
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
# obtain supplementaion entity information
self.entityInformation = loadEntityInformation(self.modelXbrl, entrypoint, rssItem)
# identify table facts (table datapoints) (prior to locked database transaction
self.tableFacts = tableFacts(self.modelXbrl) # for EFM & HMRC this is ( (roleType, table_code, fact) )
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation) # load primary document facts for SEC filing
self.identifyTaxonomyRelSetsOwner()
# at this point we determine what's in the database and provide new tables
# requires locking most of the table structure
self.lockTables(('entity', 'filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyAspectsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertAspects()
self.modelXbrl.profileStat(_("XbrlSqlDB: Aspects insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
# self.modelXbrl.profileStat(_("XbrlSqlDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships() # must follow data points for footnote relationships
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
# walk down referenced document set from instance to find 'lowest' taxonomy relationship set ownership
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
# filing must own the taxonomy set
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None: # manifest for inline docs can own the rel sets
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else: # no single instance, pick the entry poin doct
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument # entry document (instance or inline doc set)
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear() # dereference
instanceDocuments.clear()
# check whether relationship_set is completely in instance or part/all in taxonomy
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results) # timestamp is a string
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert entity")
LEI = None
entity_comparator = ('legal_entity_number', 'file_number') if LEI else ('file_number',)
table = self.getTable('entity', 'entity_id',
('legal_entity_number',
'file_number',
'reference_number', # CIK
'tax_number',
'standard_industry_code',
'name',
'legal_state',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
entity_comparator, # cannot compare None = None if LEI is absent, always False
((LEI,
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
rssItemGet("cikNumber") or entityInfo.get("cik"),
entityInfo.get("irs-number"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end"),
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
if any ('former-conformed-name' in key for key in entityInfo.keys()):
self.getTable('former_entity', None,
('entity_id', 'former_name', 'date_changed'),
('entity_id', 'former_name', 'date_changed'),
((self.entityId,
entityInfo.get(keyPrefix + '.former-conformed-name'),
entityInfo.get(keyPrefix + '.date-changed'))
for key in entityInfo.keys() if 'former-conformed-name' in key
for keyPrefix in (key.partition('.')[0],)),
checkIfExisting=True)
self.showStatus("insert filing")
table = self.getTable('filing', 'filing_id',
('filing_number', 'form_type', 'entity_id', 'reference_number',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url', 'entry_url', ),
('filing_number',),
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())), # NOT NULL
rssItemGet("formType") or entityInfo.get("form-type"),
self.entityId,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now, # NOT NULL
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url")
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
# must include document items taxonomy even if not in DTS
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[self.pyStrFromDbStr(docUrl)],docId)
for docId, docUrl in results)
# identify whether taxonomyRelsSetsOwner is existing
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyAspectsUsed(self):
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
aspectsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
aspectsUsed.add(dim.dimension)
if dim.isExplicit:
aspectsUsed.add(dim.member)
else:
aspectsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
aspectsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
aspectsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass # no DTS
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts: # qname may be undefined or invalid and still 2.1 legal
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
# add aspects referenced by logging entries
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
aspectsUsed.add(modelObject)
# add substitution groups
aspectsUsed |= set(aspect.substitutionGroup
for aspect in aspectsUsed
if aspect is not None)
aspectsUsed -= {None} # remove None if in aspectsUsed
self.aspectsUsed = aspectsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS: # exclude nonDTS types (schema, etc)
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list): # union derivation
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else: # single derivation
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for aspect in aspectsUsed:
modelType = aspect.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[self.pyStrFromDbStr(url)], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
# instance documents are filing references
# update report with document references
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None, # no id column in this table
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
# referenced doc may be extension schema
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
)
def insertAspects(self):
self.showStatus("insert aspects")
# determine new filing documents and types they use
filingDocumentAspects = set()
existingDocumentUsedAspects = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentAspects.add(concept)
filingDocumentAspectType = concept.type
if filingDocumentAspectType is not None and filingDocumentAspectType not in self.typesUsed:
self.typesUsed.add(filingDocumentAspectType)
elif concept in self.aspectsUsed:
existingDocumentUsedAspects.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
# get existing element IDs
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
# update derivedFrom's of newly added types
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.aspectQnameId = {}
# get existing element IDs
if existingDocumentUsedAspects:
table = self.getTable('aspect', 'aspect_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedAspects
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
aspects = []
for concept in filingDocumentAspects:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
aspects.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.aspectQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('aspect', 'aspect_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_aspect_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
aspects
)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
updatesToSubstitutionGroup = set()
for concept in filingDocumentAspects:
if concept.substitutionGroup in filingDocumentAspects and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.aspectQnameId[concept.qname],
self.aspectQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('aspect',
('aspect_id', 'substitution_group_aspect_id'),
updatesToSubstitutionGroup)
filingDocumentAspects.clear() # dereference
existingDocumentUsedAspects.clear() # dereference
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
# add existing arcrole types
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI) # key on docId, uriId
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleTypeIDs[1] # uri Id
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
# added document arcrole type
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI), # key on docId, uriId
arcroleType) # value is roleType object
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.aspectQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
# add existing role types
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI) # key on docId, uriId
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleTypeIDs[1] # uri Id
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
# new document role types
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI), # key on docId, uriId
roleType) # value is roleType object
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1], # uri Id
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.aspectQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
# deduplicate resources (may be on multiple arcs)
arcroles = [arcrole
# check whether relationship_set is completely in instance or part/all in taxonomy
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
# note that lxml has no column numbers, use objectIndex as pseudo-column number
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.aspectTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factDataPointId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
# do tree walk to build relationships with depth annotated, no targetRole navigation
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight), # none if no weight
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:] # dererefence
def insertDataPoints(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior data points of this report")
# remove prior facts
self.lockTables(("data_point", "entity_identifier", "period", "aspect_value_selection",
"aspect_value_selection_set", "unit_measure", "unit",
"table_data_points"))
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("data_point"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("entity_identifier"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("period"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.aspect_value_selection_id = {1}.aspect_value_selection_id"
.format( self.dbTableName("aspect_value_selection"),
self.dbTableName("aspect_value_selection_set"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1};"
.format( self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.unit_id = {1}.unit_id"
.format( self.dbTableName("unit_measure"),
self.dbTableName("unit"),
reportId),
close=False, fetch=False)
self.execute("DELETE from {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("unit"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("table_data_points"), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
# units
table = self.getTable('unit', 'unit_id',
('report_id', 'xml_id', 'xml_child_seq', 'measures_hash'),
('report_id', 'measures_hash'),
tuple((reportId,
unit.id,
elementChildSequence(unit),
unit.md5hash)
for unit in dict((unit.md5hash,unit) # deduplicate by md5hash
for unit in self.modelXbrl.units.values()).values()))
self.unitId = dict(((_reportId, measuresHash), id)
for id, _reportId, measuresHash in table)
# measures
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[(reportId,unit.md5hash)],
measure.clarkNotation,
i == 0)
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('report_id', 'scheme', 'identifier'),
('report_id', 'scheme', 'identifier'),
set((reportId,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # entities shared across multiple instance/inline docs
self.entityIdentifierId = dict(((_reportId, entScheme, entIdent), id)
for id, _reportId, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
set((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # periods shared across multiple instance/inline docs
self.periodId = dict(((_reportId, start, end, isInstant, isForever), id)
for id, _reportId, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.aspectQnameId[modelDimValue.dimensionQname],
self.aspectQnameId.get(modelDimValue.memberQname),
modelDimValue.isTyped,
modelDimValue.stringValue if modelDimValue.isTyped else None)
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.aspectQnameId)
cntxAspectValueSelectionSet = dict((cntx, cntxDimsSet(cntx))
for cntx in self.modelXbrl.contexts.values())
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSelectionSet.items()
if aspectValueSelectionSet)
self.lockTables(("aspect_value_selection_set",))
self.execute("DELETE FROM {0} WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
table = self.getTable('aspect_value_selection_set', 'aspect_value_selection_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
# assure we only get single entry per result (above gives cross product)
table = self.execute("SELECT aspect_value_selection_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId))
aspectValueSelectionSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSelectionSetId = dict((cntx, aspectValueSelectionSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSelectionSet.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_selection',
None,
('aspect_value_selection_id', 'aspect_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_selection_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueSelectionSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
# facts
def insertFactSet(modelFacts, parentDatapointId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id,
elementChildSequence(fact),
fact.sourceline,
parentDatapointId, # parent ID
self.aspectQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((reportId, cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSelectionSetId.get(cntx) if cntx is not None else None,
self.unitId.get((reportId,fact.unit.md5hash)) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value
))
table = self.getTable('data_point', 'datapoint_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'parent_datapoint_id', # tuple
'aspect_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_selection_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdDataPointId = dict(((docId, xml_child_seq), datapointId)
for datapointId, docId, xml_child_seq in table)
self.factDataPointId.update(xmlIdDataPointId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factDataPointId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
tableDataPoints = []
for roleType, tableCode, fact in self.tableFacts:
try:
tableDataPoints.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table data points role or data point")
pass
table = self.getTable('table_data_points', None,
('report_id', 'object_id', 'table_code', 'datapoint_id'),
('report_id', 'object_id', 'datapoint_id'),
tableDataPoints)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factDataPointId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| 58.5382 | 144 | 0.517068 |
import os, time, datetime, logging
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence
from arelle import XbrlConst
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, entrypoint=None, rssItem=None, **kwargs):
xbrlDbConn = None
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs:
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
xbrlDbConn.insertXbrl(entrypoint, rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"aspect", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"data_point", "entity", "period", "unit", "unit_measure", "aspect_value_selection",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables == XBRLDBTABLES:
self.create(os.path.join("sql", "semantic", {"mssql": "xbrlSemanticMSSqlDB.sql",
"mysql": "xbrlSemanticMySqlDB.ddl",
"sqlite": "xbrlSemanticSQLiteDB.ddl",
"orcl": "xbrlSemanticOracleDB.sql",
"postgres": "xbrlSemanticPostgresDB.ddl"}[self.product]))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, entrypoint, rssItem):
try:
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
self.entityInformation = loadEntityInformation(self.modelXbrl, entrypoint, rssItem)
self.tableFacts = tableFacts(self.modelXbrl)
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation)
self.identifyTaxonomyRelSetsOwner()
# requires locking most of the table structure
self.lockTables(('entity', 'filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyAspectsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertAspects()
self.modelXbrl.profileStat(_("XbrlSqlDB: Aspects insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships()
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None:
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear()
instanceDocuments.clear()
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results)
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert entity")
LEI = None
entity_comparator = ('legal_entity_number', 'file_number') if LEI else ('file_number',)
table = self.getTable('entity', 'entity_id',
('legal_entity_number',
'file_number',
'reference_number',
'tax_number',
'standard_industry_code',
'name',
'legal_state',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
entity_comparator,
((LEI,
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
rssItemGet("cikNumber") or entityInfo.get("cik"),
entityInfo.get("irs-number"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end"),
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
if any ('former-conformed-name' in key for key in entityInfo.keys()):
self.getTable('former_entity', None,
('entity_id', 'former_name', 'date_changed'),
('entity_id', 'former_name', 'date_changed'),
((self.entityId,
entityInfo.get(keyPrefix + '.former-conformed-name'),
entityInfo.get(keyPrefix + '.date-changed'))
for key in entityInfo.keys() if 'former-conformed-name' in key
for keyPrefix in (key.partition('.')[0],)),
checkIfExisting=True)
self.showStatus("insert filing")
table = self.getTable('filing', 'filing_id',
('filing_number', 'form_type', 'entity_id', 'reference_number',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url', 'entry_url', ),
('filing_number',),
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())),
rssItemGet("formType") or entityInfo.get("form-type"),
self.entityId,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now,
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url")
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[self.pyStrFromDbStr(docUrl)],docId)
for docId, docUrl in results)
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyAspectsUsed(self):
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
aspectsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
aspectsUsed.add(dim.dimension)
if dim.isExplicit:
aspectsUsed.add(dim.member)
else:
aspectsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
aspectsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
aspectsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts:
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
aspectsUsed.add(modelObject)
aspectsUsed |= set(aspect.substitutionGroup
for aspect in aspectsUsed
if aspect is not None)
aspectsUsed -= {None}
self.aspectsUsed = aspectsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS:
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list):
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else:
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for aspect in aspectsUsed:
modelType = aspect.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[self.pyStrFromDbStr(url)], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None,
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
)
def insertAspects(self):
self.showStatus("insert aspects")
filingDocumentAspects = set()
existingDocumentUsedAspects = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentAspects.add(concept)
filingDocumentAspectType = concept.type
if filingDocumentAspectType is not None and filingDocumentAspectType not in self.typesUsed:
self.typesUsed.add(filingDocumentAspectType)
elif concept in self.aspectsUsed:
existingDocumentUsedAspects.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.aspectQnameId = {}
# get existing element IDs
if existingDocumentUsedAspects:
table = self.getTable('aspect', 'aspect_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedAspects
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
aspects = []
for concept in filingDocumentAspects:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
aspects.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.aspectQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('aspect', 'aspect_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_aspect_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
aspects
)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
updatesToSubstitutionGroup = set()
for concept in filingDocumentAspects:
if concept.substitutionGroup in filingDocumentAspects and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.aspectQnameId[concept.qname],
self.aspectQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('aspect',
('aspect_id', 'substitution_group_aspect_id'),
updatesToSubstitutionGroup)
filingDocumentAspects.clear()
existingDocumentUsedAspects.clear()
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI)
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0],
arcroleTypeIDs[1]
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI),
arcroleType)
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0],
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None,
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.aspectQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI)
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0],
roleTypeIDs[1]
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI),
roleType)
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0],
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1],
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None,
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.aspectQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
arcroles = [arcrole
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.aspectTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factDataPointId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight),
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:]
def insertDataPoints(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior data points of this report")
self.lockTables(("data_point", "entity_identifier", "period", "aspect_value_selection",
"aspect_value_selection_set", "unit_measure", "unit",
"table_data_points"))
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("data_point"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("entity_identifier"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("period"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.aspect_value_selection_id = {1}.aspect_value_selection_id"
.format( self.dbTableName("aspect_value_selection"),
self.dbTableName("aspect_value_selection_set"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1};"
.format( self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.unit_id = {1}.unit_id"
.format( self.dbTableName("unit_measure"),
self.dbTableName("unit"),
reportId),
close=False, fetch=False)
self.execute("DELETE from {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("unit"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("table_data_points"), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
table = self.getTable('unit', 'unit_id',
('report_id', 'xml_id', 'xml_child_seq', 'measures_hash'),
('report_id', 'measures_hash'),
tuple((reportId,
unit.id,
elementChildSequence(unit),
unit.md5hash)
for unit in dict((unit.md5hash,unit)
for unit in self.modelXbrl.units.values()).values()))
self.unitId = dict(((_reportId, measuresHash), id)
for id, _reportId, measuresHash in table)
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[(reportId,unit.md5hash)],
measure.clarkNotation,
i == 0)
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('report_id', 'scheme', 'identifier'),
('report_id', 'scheme', 'identifier'),
set((reportId,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True)
self.entityIdentifierId = dict(((_reportId, entScheme, entIdent), id)
for id, _reportId, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
set((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True)
self.periodId = dict(((_reportId, start, end, isInstant, isForever), id)
for id, _reportId, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.aspectQnameId[modelDimValue.dimensionQname],
self.aspectQnameId.get(modelDimValue.memberQname),
modelDimValue.isTyped,
modelDimValue.stringValue if modelDimValue.isTyped else None)
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.aspectQnameId)
cntxAspectValueSelectionSet = dict((cntx, cntxDimsSet(cntx))
for cntx in self.modelXbrl.contexts.values())
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSelectionSet.items()
if aspectValueSelectionSet)
self.lockTables(("aspect_value_selection_set",))
self.execute("DELETE FROM {0} WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
table = self.getTable('aspect_value_selection_set', 'aspect_value_selection_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
table = self.execute("SELECT aspect_value_selection_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId))
aspectValueSelectionSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSelectionSetId = dict((cntx, aspectValueSelectionSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSelectionSet.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_selection',
None,
('aspect_value_selection_id', 'aspect_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_selection_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueSelectionSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
def insertFactSet(modelFacts, parentDatapointId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id,
elementChildSequence(fact),
fact.sourceline,
parentDatapointId,
self.aspectQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((reportId, cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSelectionSetId.get(cntx) if cntx is not None else None,
self.unitId.get((reportId,fact.unit.md5hash)) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value
))
table = self.getTable('data_point', 'datapoint_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'parent_datapoint_id',
'aspect_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_selection_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdDataPointId = dict(((docId, xml_child_seq), datapointId)
for datapointId, docId, xml_child_seq in table)
self.factDataPointId.update(xmlIdDataPointId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factDataPointId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
tableDataPoints = []
for roleType, tableCode, fact in self.tableFacts:
try:
tableDataPoints.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table data points role or data point")
pass
table = self.getTable('table_data_points', None,
('report_id', 'object_id', 'table_code', 'datapoint_id'),
('report_id', 'object_id', 'datapoint_id'),
tableDataPoints)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factDataPointId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| true | true |
f72b45704b6b738dc155c0160abe96949099d9a7 | 13,758 | py | Python | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | """Data structure for CAtlas."""
import argparse
import cProfile
import os
import sys
import tempfile
import gzip
import copy
from .rdomset import rdomset, domination_graph
from .graph_io import read_from_gxt, write_to_gxt
from .graph import Graph
from spacegraphcats.utils.logging import log_command
from io import TextIOWrapper
from collections import defaultdict
from typing import List, Dict, Set
UPPER_RADIUS = 1
class Project(object):
"""Methods for coordinating whole projects."""
def __init__(self, directory, r, checkpoint=True):
"""
Make a project in directory at raidus r.
This object stores the intermediate variables for the CAtlas building
so that they can be checkpointed as necessary.
"""
self.dir = directory
self.r = r
self.checkpoint = checkpoint
self.graph = None
self.idx = 0
self.level = 1
self.level_nodes = None
self.root = CAtlas(self.idx, -1, self.level, list())
# project file names
self.domfilename = os.path.join(self.dir, "first_doms.txt")
self.graphfilename = os.path.join(self.dir, "cdbg.gxt")
self.catlasfilename = os.path.join(self.dir, "catlas.csv")
def existing_checkpoints(self):
"""Get the existing checkpoint files."""
files = []
for f in os.listdir(self.dir):
name, ext = os.path.splitext(f)
if ext == ".checkpoint":
r, level = map(int, name.split("_"))
if r == self.r:
files.append(level)
return list(sorted(files))
def cp_name(self, level):
"""Return the name of the checkpoint file after level level."""
return os.path.join(self.dir,
"{}_{}.checkpoint".format(self.r, level))
def load_furthest_checkpoint(self):
"""Load the checkpoint that is furthest along."""
existing = self.existing_checkpoints()
# if there are no checkpoints or we don't want to load from one,
# just read G from the graph file
if len(existing) == 0 or not self.checkpoint:
print("Loading graph from {}".format(self.graphfilename))
# we only need to set the graph variable since index, level, and
# previous nodes have the proper values by default
with open(self.graphfilename, 'r') as graph_file:
self.graph = read_from_gxt(graph_file, self.r, False)
else:
self.load_checkpoint(existing[-1])
def load_checkpoint(self, level):
"""Read cached information from a partial catlas computation."""
if not self.checkpoint:
raise IOError("I told you I didn't want to load from checkpoint!")
print("Loading results of building level {}".format(level))
# the temp file contains catlas and graph information. To use the
# readers for catlas and graph, we need to temporarily split them into
# separate files
tmpf = tempfile.TemporaryFile(mode='r+')
infile = self.cp_name(level)
with gzip.open(infile, 'rt') as f:
# read until the end of the catlas
for line in f:
if line == "###\n":
break
tmpf.write(line)
# once we are at the graph section, start reading from there
self.graph = read_from_gxt(f, radius=UPPER_RADIUS, directed=False,
sequential=False)
# move back to the beginning of the temporary file and read the
# catlas
tmpf.seek(0)
root = CAtlas.read(tmpf)
tmpf.close()
# the checkpointed CAtlas has a dummy root. The nodes in the
# current level need to be removed from the root because we haven't
# finished constructing their parents.
unfinished_idx = -1*len(self.graph)
unfinished = root.children[unfinished_idx:]
root.children = root.children[:unfinished_idx]
self.level_nodes = {node.vertex: node for node in unfinished}
self.idx = root.idx
self.level = root.level - 1
self.root = root
def _save(self):
"""Method used by the thread to write out."""
outfile = self.cp_name(self.level - 1)
print("Writing to file {}".format(outfile))
with gzip.open(outfile, 'wt') as f:
# make a dummy root to write the catlas using catlas.write method
# we add all current level nodes as children of the root
root = CAtlas(self.idx, -1, self.level,
copy.copy(self.root.children))
root.children.extend(self.level_nodes.values())
root.write(f)
f.write("###\n")
write_to_gxt(f, self.graph)
def save_checkpoint(self):
"""Write out a partial computation."""
if not self.checkpoint:
return
else:
self._save()
class CAtlas(object):
"""Hierarchical atlas for querying graphs."""
LEVEL_THRESHOLD = 10
def __init__(self, idx, vertex, level, children):
"""
Construct a CAtlas node.
Arguments:
idx: Integer identifier of the node. A CAtlas with n nodes will
have ids 0,1,...,n-1. The root will always have id n-1.
vertex: Name of vertex in the cDBG
level: The height of the node in the hierarchy. The leaves are at
level 1, their parents at level 2, etc.
children: the CAtlas nodes for which this is a parent
"""
self.idx = idx
self.vertex = vertex
self.children = children
self.level = level
@staticmethod
def build(proj, benchmark_only=False):
"""Build a CAtlas at a given radius."""
# keep creating progressively smaller graphs until we hit the level
# threshold or steady state
while True:
print()
# the base level should have a large radius, others are just 1
if proj.level == 1:
r = proj.r
else:
r = UPPER_RADIUS
# build the current level
nodes, domgraph, dominated = CAtlas._build_level(proj.graph,
r,
proj.level,
proj.idx,
proj.level_nodes)
print("Catlas level {} complete".format(proj.level))
# at the bottom level we need to write out the domination
# assignment
if proj.level == 1 and not benchmark_only:
with open(proj.domfilename, 'w') as domfile:
for v, shadow in dominated.items():
domstr = str(v)
for u in shadow:
domstr += " {}".format(u)
domstr += "\n"
domfile.write(domstr)
# increment the index and level now so they are correctly adjusted
# if we happen to return
proj.idx += len(nodes)
proj.level += 1
# Keeping isolated vertices as parents of themselves blows up the
# CAtlas size unnecessarily. We need to immediately make them
# children of the root.
for v in dominated:
if v not in domgraph:
proj.root.children.append(nodes.pop(v))
# quit if our level is sufficiently small
if len(domgraph) <= CAtlas.LEVEL_THRESHOLD or \
len(domgraph) == len(proj.graph):
break
# prep for the next iteration
proj.graph = domgraph
proj.level_nodes = nodes
# write level results to the checkpoint file if applicable
proj.save_checkpoint()
if benchmark_only:
return None
if not nodes:
return None
# place all remaining nodes as children of the root
proj.root.children.extend(nodes.values())
proj.root.level = proj.level
proj.root.vertex = list(nodes.keys())[0]
proj.root.idx = proj.idx
return proj.root
@staticmethod
def _build_level(graph: Graph, radius: int, level: int, min_id: int=0,
prev_nodes: List[int]=None):
# find the domgraph of the current domgraph
domset = rdomset(graph, radius)
# dominated maps dominating vertices to a list of the vertices they
# optimally dominate
domgraph, dominated = domination_graph(graph, domset, radius)
# create the CAtlas nodes
nodes = {} # type: Dict[int, CAtlas]
for idx, v in enumerate(domset):
# if no previous nodes were supplied, we assume we are on the
# bottom level and thus the children field is empty
if prev_nodes is None:
children = [] # type: List[int]
else:
children = [prev_nodes[u] for u in dominated[v]]
nodes[v] = CAtlas(min_id+idx, v, level, children)
return nodes, domgraph, dominated
def leaves(self, visited: Set[object]=None) -> Set[object]:
"""Find the descendants of this node with no children."""
# this function is recursive so we need to keep track of nodes we
# already visited
if visited is None:
visited = set([self])
# base case is level 0
if self.level == 1:
return set([self])
# otherwise gather the leaves of the children
res = set() # type: Set[object]
for c in self.children:
if c not in visited:
visited.add(c)
res |= c.leaves(visited)
return res
def write(self, outfile: TextIOWrapper):
"""Write the connectivity of the CAtlas to file."""
# doesn't matter how we traverse the graph, so we use DFS for ease of
# implementation
stack = [self]
seen = set()
while len(stack) > 0:
# remove from the stack
curr = stack.pop()
# write node information
child_str = " ".join(str(child.idx) for child in curr.children)
outfile.write("{},{},{},{}\n".format(curr.idx,
curr.vertex,
curr.level,
child_str))
# all nodes already seen don't get re-added
seen.add(curr)
stack.extend(filter(lambda x: x not in seen, curr.children))
@classmethod
def read(cls, catlas_file):
"""Load the catlas Directed Acyclic Graph."""
children = []
nodes = []
# load everything from the catlas file
for line in catlas_file:
catlas_node, cdbg_node, level, beneath = line.strip().split(',')
level = int(level)
catlas_node = int(catlas_node)
cdbg_node = int(cdbg_node)
# extend arrays as necessary
if len(children) <= catlas_node:
for i in range(catlas_node - len(children) + 1):
children.append([])
nodes.append(None)
# parse out the children
beneath = beneath.strip()
if beneath:
beneath = beneath.split(' ')
children[catlas_node].extend(map(int, beneath))
# make the new node with empty children
node = cls(catlas_node, cdbg_node, level, [])
nodes[catlas_node] = node
# update the nodes with pointers to their children
for i, n in enumerate(nodes):
for child in children[n.idx]:
n.children.append(nodes[child])
return nodes[-1]
def main(args):
"""Build a CAtlas for the provided input graph."""
# unpack command line arguments
r = args.radius
proj_dir = args.project
checkpoint = not args.no_checkpoint
level = args.level
# make checkpoint
proj = Project(proj_dir, r, checkpoint)
print("reading graph")
if level:
print("Loading checkpoint at level {}".format(level))
proj.load_checkpoint(level)
else:
print("Loading checkpoint")
proj.load_furthest_checkpoint()
print("reading complete")
print("building catlas")
cat = CAtlas.build(proj)
if cat is None:
print("ERROR: catlas is empty!? exiting.")
return -1
print("catlas built")
print("writing graph")
with open(proj.catlasfilename, 'w') as cfile:
cat.write(cfile)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("project", help="Project directory",
type=str)
parser.add_argument("radius", help="Catlas radius", type=int)
parser.add_argument("-n", "--no_checkpoint", action='store_true',
help="Do not read or write checkpoints")
parser.add_argument("-l", "--level", type=int,
help="Level at which to load the checkpoint."
"Defaults to highest level saved when not invoked.")
args = parser.parse_args()
exit_val = main(args)
# prof = cProfile.Profile()
# prof.run("main(args)")
# prof.print_stats('tottime')
log_command(args.project, sys.argv)
sys.exit(exit_val)
| 36.590426 | 79 | 0.559674 |
import argparse
import cProfile
import os
import sys
import tempfile
import gzip
import copy
from .rdomset import rdomset, domination_graph
from .graph_io import read_from_gxt, write_to_gxt
from .graph import Graph
from spacegraphcats.utils.logging import log_command
from io import TextIOWrapper
from collections import defaultdict
from typing import List, Dict, Set
UPPER_RADIUS = 1
class Project(object):
def __init__(self, directory, r, checkpoint=True):
self.dir = directory
self.r = r
self.checkpoint = checkpoint
self.graph = None
self.idx = 0
self.level = 1
self.level_nodes = None
self.root = CAtlas(self.idx, -1, self.level, list())
self.domfilename = os.path.join(self.dir, "first_doms.txt")
self.graphfilename = os.path.join(self.dir, "cdbg.gxt")
self.catlasfilename = os.path.join(self.dir, "catlas.csv")
def existing_checkpoints(self):
files = []
for f in os.listdir(self.dir):
name, ext = os.path.splitext(f)
if ext == ".checkpoint":
r, level = map(int, name.split("_"))
if r == self.r:
files.append(level)
return list(sorted(files))
def cp_name(self, level):
return os.path.join(self.dir,
"{}_{}.checkpoint".format(self.r, level))
def load_furthest_checkpoint(self):
existing = self.existing_checkpoints()
# just read G from the graph file
if len(existing) == 0 or not self.checkpoint:
print("Loading graph from {}".format(self.graphfilename))
# we only need to set the graph variable since index, level, and
# previous nodes have the proper values by default
with open(self.graphfilename, 'r') as graph_file:
self.graph = read_from_gxt(graph_file, self.r, False)
else:
self.load_checkpoint(existing[-1])
def load_checkpoint(self, level):
if not self.checkpoint:
raise IOError("I told you I didn't want to load from checkpoint!")
print("Loading results of building level {}".format(level))
tmpf = tempfile.TemporaryFile(mode='r+')
infile = self.cp_name(level)
with gzip.open(infile, 'rt') as f:
for line in f:
if line == "###\n":
break
tmpf.write(line)
self.graph = read_from_gxt(f, radius=UPPER_RADIUS, directed=False,
sequential=False)
tmpf.seek(0)
root = CAtlas.read(tmpf)
tmpf.close()
# finished constructing their parents.
unfinished_idx = -1*len(self.graph)
unfinished = root.children[unfinished_idx:]
root.children = root.children[:unfinished_idx]
self.level_nodes = {node.vertex: node for node in unfinished}
self.idx = root.idx
self.level = root.level - 1
self.root = root
def _save(self):
outfile = self.cp_name(self.level - 1)
print("Writing to file {}".format(outfile))
with gzip.open(outfile, 'wt') as f:
# make a dummy root to write the catlas using catlas.write method
# we add all current level nodes as children of the root
root = CAtlas(self.idx, -1, self.level,
copy.copy(self.root.children))
root.children.extend(self.level_nodes.values())
root.write(f)
f.write("###\n")
write_to_gxt(f, self.graph)
def save_checkpoint(self):
if not self.checkpoint:
return
else:
self._save()
class CAtlas(object):
LEVEL_THRESHOLD = 10
def __init__(self, idx, vertex, level, children):
self.idx = idx
self.vertex = vertex
self.children = children
self.level = level
@staticmethod
def build(proj, benchmark_only=False):
# keep creating progressively smaller graphs until we hit the level
# threshold or steady state
while True:
print()
# the base level should have a large radius, others are just 1
if proj.level == 1:
r = proj.r
else:
r = UPPER_RADIUS
# build the current level
nodes, domgraph, dominated = CAtlas._build_level(proj.graph,
r,
proj.level,
proj.idx,
proj.level_nodes)
print("Catlas level {} complete".format(proj.level))
# at the bottom level we need to write out the domination
# assignment
if proj.level == 1 and not benchmark_only:
with open(proj.domfilename, 'w') as domfile:
for v, shadow in dominated.items():
domstr = str(v)
for u in shadow:
domstr += " {}".format(u)
domstr += "\n"
domfile.write(domstr)
# increment the index and level now so they are correctly adjusted
# if we happen to return
proj.idx += len(nodes)
proj.level += 1
# Keeping isolated vertices as parents of themselves blows up the
# CAtlas size unnecessarily. We need to immediately make them
# children of the root.
for v in dominated:
if v not in domgraph:
proj.root.children.append(nodes.pop(v))
# quit if our level is sufficiently small
if len(domgraph) <= CAtlas.LEVEL_THRESHOLD or \
len(domgraph) == len(proj.graph):
break
# prep for the next iteration
proj.graph = domgraph
proj.level_nodes = nodes
# write level results to the checkpoint file if applicable
proj.save_checkpoint()
if benchmark_only:
return None
if not nodes:
return None
# place all remaining nodes as children of the root
proj.root.children.extend(nodes.values())
proj.root.level = proj.level
proj.root.vertex = list(nodes.keys())[0]
proj.root.idx = proj.idx
return proj.root
@staticmethod
def _build_level(graph: Graph, radius: int, level: int, min_id: int=0,
prev_nodes: List[int]=None):
# find the domgraph of the current domgraph
domset = rdomset(graph, radius)
# dominated maps dominating vertices to a list of the vertices they
# optimally dominate
domgraph, dominated = domination_graph(graph, domset, radius)
# create the CAtlas nodes
nodes = {} # type: Dict[int, CAtlas]
for idx, v in enumerate(domset):
# if no previous nodes were supplied, we assume we are on the
# bottom level and thus the children field is empty
if prev_nodes is None:
children = [] # type: List[int]
else:
children = [prev_nodes[u] for u in dominated[v]]
nodes[v] = CAtlas(min_id+idx, v, level, children)
return nodes, domgraph, dominated
def leaves(self, visited: Set[object]=None) -> Set[object]:
# this function is recursive so we need to keep track of nodes we
# already visited
if visited is None:
visited = set([self])
# base case is level 0
if self.level == 1:
return set([self])
# otherwise gather the leaves of the children
res = set() # type: Set[object]
for c in self.children:
if c not in visited:
visited.add(c)
res |= c.leaves(visited)
return res
def write(self, outfile: TextIOWrapper):
# doesn't matter how we traverse the graph, so we use DFS for ease of
stack = [self]
seen = set()
while len(stack) > 0:
curr = stack.pop()
child_str = " ".join(str(child.idx) for child in curr.children)
outfile.write("{},{},{},{}\n".format(curr.idx,
curr.vertex,
curr.level,
child_str))
seen.add(curr)
stack.extend(filter(lambda x: x not in seen, curr.children))
@classmethod
def read(cls, catlas_file):
children = []
nodes = []
# load everything from the catlas file
for line in catlas_file:
catlas_node, cdbg_node, level, beneath = line.strip().split(',')
level = int(level)
catlas_node = int(catlas_node)
cdbg_node = int(cdbg_node)
# extend arrays as necessary
if len(children) <= catlas_node:
for i in range(catlas_node - len(children) + 1):
children.append([])
nodes.append(None)
# parse out the children
beneath = beneath.strip()
if beneath:
beneath = beneath.split(' ')
children[catlas_node].extend(map(int, beneath))
# make the new node with empty children
node = cls(catlas_node, cdbg_node, level, [])
nodes[catlas_node] = node
# update the nodes with pointers to their children
for i, n in enumerate(nodes):
for child in children[n.idx]:
n.children.append(nodes[child])
return nodes[-1]
def main(args):
# unpack command line arguments
r = args.radius
proj_dir = args.project
checkpoint = not args.no_checkpoint
level = args.level
# make checkpoint
proj = Project(proj_dir, r, checkpoint)
print("reading graph")
if level:
print("Loading checkpoint at level {}".format(level))
proj.load_checkpoint(level)
else:
print("Loading checkpoint")
proj.load_furthest_checkpoint()
print("reading complete")
print("building catlas")
cat = CAtlas.build(proj)
if cat is None:
print("ERROR: catlas is empty!? exiting.")
return -1
print("catlas built")
print("writing graph")
with open(proj.catlasfilename, 'w') as cfile:
cat.write(cfile)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("project", help="Project directory",
type=str)
parser.add_argument("radius", help="Catlas radius", type=int)
parser.add_argument("-n", "--no_checkpoint", action='store_true',
help="Do not read or write checkpoints")
parser.add_argument("-l", "--level", type=int,
help="Level at which to load the checkpoint."
"Defaults to highest level saved when not invoked.")
args = parser.parse_args()
exit_val = main(args)
# prof = cProfile.Profile()
# prof.run("main(args)")
# prof.print_stats('tottime')
log_command(args.project, sys.argv)
sys.exit(exit_val)
| true | true |
f72b457385a4ebe5715f4121fb45c8d4b9cc7073 | 4,175 | py | Python | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | 1 | 2018-01-17T02:17:55.000Z | 2018-01-17T02:17:55.000Z | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | 16 | 2017-02-03T06:31:39.000Z | 2020-03-03T15:15:09.000Z | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABCMeta, abstractmethod
class Component(object):
"""
Interface definition for a frontier component
The :class:`Component <frontera.core.components.Component>` object is the base class for frontier
:class:`Middleware <frontera.core.components.Middleware>` and
:class:`Backend <frontera.core.components.Backend>` objects.
:class:`FrontierManager <frontera.core.manager.FrontierManager>` communicates with the active components
using the hook methods listed below.
Implementations are different for :class:`Middleware <frontera.core.components.Middleware>` and
:class:`Backend <frontera.core.components.Backend>` objects, therefore methods are not fully described here
but in their corresponding section.
"""
__metaclass__ = ABCMeta
component_name = 'Base Component'
@abstractmethod
def frontier_start(self):
"""
Called when the frontier starts, see :ref:`starting/stopping the frontier <frontier-start-stop>`.
"""
pass
@abstractmethod
def frontier_stop(self):
"""
Called when the frontier stops, see :ref:`starting/stopping the frontier <frontier-start-stop>`.
"""
pass
@abstractmethod
def add_seeds(self, seeds):
"""
This method is called when new seeds are are added to the frontier.
:param list seeds: A list of :class:`Request <frontera.core.models.Request>` objects.
"""
pass
@abstractmethod
def page_crawled(self, response):
"""
This method is called each time a page has been crawled.
:param object response: The :class:`Response <frontera.core.models.Response>` object for the crawled page.
"""
pass
@abstractmethod
def links_extracted(self, request, links):
"""
:param oject request: The :class:`Request <frontera.core.models.Request>` object.
:param list links: A list of :class:`Request <frontera.core.models.Request>` objects generated from \
the links extracted for the crawled page.
"""
pass
@abstractmethod
def request_error(self, page, error):
"""
This method is called each time an error occurs when crawling a page
:param object request: The crawled with error :class:`Request <frontera.core.models.Request>` object.
:param string error: A string identifier for the error.
"""
pass
@property
def name(self):
"""
The component name
"""
return self.component_name
@classmethod
def from_manager(cls, manager):
"""
Class method called from :class:`FrontierManager <frontera.core.manager.FrontierManager>` passing the
manager itself.
Example of usage::
def from_manager(cls, manager):
return cls(settings=manager.settings)
"""
return cls()
class Backend(Component):
"""Interface definition for a Frontier Backend"""
__metaclass__ = ABCMeta
component_name = 'Base Backend'
@abstractmethod
def get_next_requests(self, max_n_requests, **kwargs):
"""
Returns a list of next requests to be crawled.
:param int max_next_requests: Maximum number of requests to be returned by this method.
:param dict kwargs: A parameters from downloader component.
:return: list of :class:`Request <frontera.core.models.Request>` objects.
"""
raise NotImplementedError
class Middleware(Component):
"""Interface definition for a Frontier Middlewares"""
__metaclass__ = ABCMeta
component_name = 'Base Middleware'
class CanonicalSolver(Component):
"""Interface definition for a Frontera Canonical Solver"""
__metaclass__ = ABCMeta
component_name = 'Base CanonicalSolver'
@abstractmethod
def get_canonical_url(self, response):
"""
Returns canonical URL string for response.
:param object response: The :class:`Response <frontera.core.models.Response>` object for the crawled page.
:return: str
"""
raise NotImplementedError
| 31.390977 | 114 | 0.662275 | from abc import ABCMeta, abstractmethod
class Component(object):
__metaclass__ = ABCMeta
component_name = 'Base Component'
@abstractmethod
def frontier_start(self):
pass
@abstractmethod
def frontier_stop(self):
pass
@abstractmethod
def add_seeds(self, seeds):
pass
@abstractmethod
def page_crawled(self, response):
pass
@abstractmethod
def links_extracted(self, request, links):
pass
@abstractmethod
def request_error(self, page, error):
pass
@property
def name(self):
return self.component_name
@classmethod
def from_manager(cls, manager):
return cls()
class Backend(Component):
__metaclass__ = ABCMeta
component_name = 'Base Backend'
@abstractmethod
def get_next_requests(self, max_n_requests, **kwargs):
raise NotImplementedError
class Middleware(Component):
__metaclass__ = ABCMeta
component_name = 'Base Middleware'
class CanonicalSolver(Component):
__metaclass__ = ABCMeta
component_name = 'Base CanonicalSolver'
@abstractmethod
def get_canonical_url(self, response):
raise NotImplementedError
| true | true |
f72b4575023d53c977d16e195686c5c67ffc5f9f | 3,076 | py | Python | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | 5 | 2019-08-31T11:00:40.000Z | 2021-04-15T10:05:35.000Z | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | null | null | null | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | 1 | 2020-05-27T08:00:53.000Z | 2020-05-27T08:00:53.000Z | import numpy as np
from tqdm import tqdm
class Hypergraph(object):
def __init__(self,graph_type='0',nums_type=None):
self._nodes = {} # node set
self._edges = {} # edge set (hash index)
self.graph_type = graph_type # graph type, homogeneous:0, heterogeneous:1
self.nums_type = nums_type # for heterogeneous graph, number of different node type
self.cumsum = np.cumsum(self.nums_type) if self.graph_type=='1' else None # cumsum of nums_type
def add_edge(self, edge_name, e):
'''
Add a hyperedge.
edge_name: name of hyperedge
edge: node list of hyperedge
weight: weight of hyperedge
'''
edge = tuple(sorted(e))
self._edges[edge] = self._edges.get(edge,0)+1
for v in edge:
node_dict = self._nodes.get(v, {})
neighbors = node_dict.get('neighbors', set())
for v0 in edge:
if v0!=v:
neighbors.add(v0)
node_dict['neighbors'] = neighbors
if self.graph_type=='1':
for i,k in enumerate(self.cumsum):
if int(v) < k:
break
node_dict['type'] = i
self._nodes[v] = node_dict
def edge_weight(self, e):
'''weight of weight e'''
return self._edges.get(e,0)
def nodes(self):
'''node set'''
return self._nodes.keys()
def edges(self):
'''edge set'''
return self._edges.keys()
def neighbors(self, n):
'''neighbors of node n'''
return self._nodes[n]['neighbors']
def node_type(self, n):
'''type of node n'''
return self._nodes[n]['type']
def get_indecom_factor(G, r):
'''
Get the indecomposable factor of heterogeneous hyper-network G.
'''
edges = list(G.edges())
k = len(G.nums_type)
m = len(edges)
dcnt = []
for i in range(k):
dcnt.append({})
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
dcnt[i][subedge] = dcnt[i].get(subedge,0)+1
factors = [0]*k
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
if dcnt[i].get(subedge,0)>1:
factors[i]+=1
factors = [factor/m for factor in factors]
cumsum = [0]+list(G.cumsum)
ps = [0]*k
neg_num = m*r # sample enough random edges
for i in tqdm(range(neg_num),ascii=True):
random_edge = []
for i in range(k):
random_edge.append(np.random.randint(cumsum[i],cumsum[i+1]))
for i in range(k):
subedge = tuple(sorted(random_edge[:i]+random_edge[i+1:]))
if dcnt[i].get(subedge,0)>1 or (dcnt[i].get(subedge,0)>0 and tuple(random_edge) not in edges):
ps[i]+=1
ps = [p/neg_num for p in ps]
indecom_factors = [ps[i]/factors[i] for i in range(k)]
return indecom_factors
| 28.747664 | 106 | 0.545189 | import numpy as np
from tqdm import tqdm
class Hypergraph(object):
def __init__(self,graph_type='0',nums_type=None):
self._nodes = {}
self._edges = {}
self.graph_type = graph_type
self.nums_type = nums_type
self.cumsum = np.cumsum(self.nums_type) if self.graph_type=='1' else None
def add_edge(self, edge_name, e):
edge = tuple(sorted(e))
self._edges[edge] = self._edges.get(edge,0)+1
for v in edge:
node_dict = self._nodes.get(v, {})
neighbors = node_dict.get('neighbors', set())
for v0 in edge:
if v0!=v:
neighbors.add(v0)
node_dict['neighbors'] = neighbors
if self.graph_type=='1':
for i,k in enumerate(self.cumsum):
if int(v) < k:
break
node_dict['type'] = i
self._nodes[v] = node_dict
def edge_weight(self, e):
return self._edges.get(e,0)
def nodes(self):
return self._nodes.keys()
def edges(self):
return self._edges.keys()
def neighbors(self, n):
return self._nodes[n]['neighbors']
def node_type(self, n):
return self._nodes[n]['type']
def get_indecom_factor(G, r):
edges = list(G.edges())
k = len(G.nums_type)
m = len(edges)
dcnt = []
for i in range(k):
dcnt.append({})
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
dcnt[i][subedge] = dcnt[i].get(subedge,0)+1
factors = [0]*k
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
if dcnt[i].get(subedge,0)>1:
factors[i]+=1
factors = [factor/m for factor in factors]
cumsum = [0]+list(G.cumsum)
ps = [0]*k
neg_num = m*r
for i in tqdm(range(neg_num),ascii=True):
random_edge = []
for i in range(k):
random_edge.append(np.random.randint(cumsum[i],cumsum[i+1]))
for i in range(k):
subedge = tuple(sorted(random_edge[:i]+random_edge[i+1:]))
if dcnt[i].get(subedge,0)>1 or (dcnt[i].get(subedge,0)>0 and tuple(random_edge) not in edges):
ps[i]+=1
ps = [p/neg_num for p in ps]
indecom_factors = [ps[i]/factors[i] for i in range(k)]
return indecom_factors
| true | true |
f72b45b62702b41808f02cf163bf8b5b63de5b30 | 5,644 | py | Python | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | import configparser
from pathlib import Path
import unittest
import uuid
from TM1py import Element, Hierarchy, Dimension
from TM1py.Objects import Cube
from TM1py.Objects import Rules
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_Cube_"
class TestCubeMethods(unittest.TestCase):
tm1 = None
cube_name = PREFIX + "some_name"
dimension_names = [
PREFIX + "dimension1",
PREFIX + "dimension2",
PREFIX + "dimension3"]
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
# Build Dimensions
for dimension_name in cls.dimension_names:
elements = [Element('Element {}'.format(str(j)), 'Numeric') for j in range(1, 1001)]
hierarchy = Hierarchy(dimension_name=dimension_name,
name=dimension_name,
elements=elements)
dimension = Dimension(dimension_name, [hierarchy])
if not cls.tm1.dimensions.exists(dimension.name):
cls.tm1.dimensions.create(dimension)
# Build Cube
cube = Cube(cls.cube_name, cls.dimension_names)
if not cls.tm1.cubes.exists(cls.cube_name):
cls.tm1.cubes.create(cube)
c = Cube(cls.cube_name, dimensions=cls.dimension_names, rules=Rules(''))
if cls.tm1.cubes.exists(c.name):
cls.tm1.cubes.delete(c.name)
cls.tm1.cubes.create(c)
def test_get_cube(self):
c = self.tm1.cubes.get(self.cube_name)
self.assertIsInstance(c, Cube)
self.assertEqual(c.dimensions, self.dimension_names)
cubes = self.tm1.cubes.get_all()
control_cubes = self.tm1.cubes.get_control_cubes()
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertEqual(len(cubes), len(control_cubes + model_cubes))
def test_update_cube(self):
c = self.tm1.cubes.get(self.cube_name)
c.rules = Rules("SKIPCHECK;\nFEEDERS;")
self.tm1.cubes.update(c)
# test if rule was actually updated
c = self.tm1.cubes.get(self.cube_name)
self.assertEqual(c.rules.text, "SKIPCHECK;\nFEEDERS;")
self.assertTrue(c.skipcheck)
def test_get_control_cubes(self):
control_cubes = self.tm1.cubes.get_control_cubes()
self.assertGreater(len(control_cubes), 0)
for cube in control_cubes:
self.assertTrue(cube.name.startswith("}"))
def test_get_model_cubes(self):
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertGreater(len(model_cubes), 0)
for cube in model_cubes:
self.assertFalse(cube.name.startswith("}"))
def test_get_dimension_names(self):
dimension_names = self.tm1.cubes.get_dimension_names(self.cube_name)
self.assertEqual(dimension_names, self.dimension_names)
def test_get_random_intersection(self):
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
self.assertNotEqual(intersection1, intersection2)
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
self.assertNotEqual(intersection1, intersection2)
def test_exists(self):
self.assertTrue(self.tm1.cubes.exists(self.cube_name))
self.assertFalse(self.tm1.cubes.exists(uuid.uuid4()))
def test_create_delete_cube(self):
cube_name = PREFIX + "Some_Other_Name"
# element with index 0 is Sandboxes
dimension_names = self.tm1.dimensions.get_all_names()[1:3]
cube = Cube(cube_name, dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.create(cube)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(
len(all_cubes_before) + 1,
len(all_cubes_after))
self.assertEqual(
self.tm1.cubes.get_dimension_names(cube_name),
dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.delete(cube_name)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(len(all_cubes_before) - 1, len(all_cubes_after))
def test_get_storage_dimension_order(self):
dimensions = self.tm1.cubes.get_storage_dimension_order(cube_name=self.cube_name)
self.assertEqual(dimensions, self.dimension_names)
def test_update_storage_dimension_order(self):
self.tm1.cubes.update_storage_dimension_order(
cube_name=self.cube_name,
dimension_names=reversed(self.dimension_names))
dimensions = self.tm1.cubes.get_storage_dimension_order(self.cube_name)
self.assertEqual(
list(reversed(dimensions)),
self.dimension_names)
def test_load(self):
response = self.tm1.cubes.load(cube_name=self.cube_name)
self.assertTrue(response.ok)
def test_unload(self):
response = self.tm1.cubes.unload(cube_name=self.cube_name)
self.assertTrue(response.ok)
@classmethod
def tearDownClass(cls):
cls.tm1.cubes.delete(cls.cube_name)
for dimension in cls.dimension_names:
cls.tm1.dimensions.delete(dimension)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| 38.394558 | 108 | 0.677711 | import configparser
from pathlib import Path
import unittest
import uuid
from TM1py import Element, Hierarchy, Dimension
from TM1py.Objects import Cube
from TM1py.Objects import Rules
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_Cube_"
class TestCubeMethods(unittest.TestCase):
tm1 = None
cube_name = PREFIX + "some_name"
dimension_names = [
PREFIX + "dimension1",
PREFIX + "dimension2",
PREFIX + "dimension3"]
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
for dimension_name in cls.dimension_names:
elements = [Element('Element {}'.format(str(j)), 'Numeric') for j in range(1, 1001)]
hierarchy = Hierarchy(dimension_name=dimension_name,
name=dimension_name,
elements=elements)
dimension = Dimension(dimension_name, [hierarchy])
if not cls.tm1.dimensions.exists(dimension.name):
cls.tm1.dimensions.create(dimension)
cube = Cube(cls.cube_name, cls.dimension_names)
if not cls.tm1.cubes.exists(cls.cube_name):
cls.tm1.cubes.create(cube)
c = Cube(cls.cube_name, dimensions=cls.dimension_names, rules=Rules(''))
if cls.tm1.cubes.exists(c.name):
cls.tm1.cubes.delete(c.name)
cls.tm1.cubes.create(c)
def test_get_cube(self):
c = self.tm1.cubes.get(self.cube_name)
self.assertIsInstance(c, Cube)
self.assertEqual(c.dimensions, self.dimension_names)
cubes = self.tm1.cubes.get_all()
control_cubes = self.tm1.cubes.get_control_cubes()
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertEqual(len(cubes), len(control_cubes + model_cubes))
def test_update_cube(self):
c = self.tm1.cubes.get(self.cube_name)
c.rules = Rules("SKIPCHECK;\nFEEDERS;")
self.tm1.cubes.update(c)
c = self.tm1.cubes.get(self.cube_name)
self.assertEqual(c.rules.text, "SKIPCHECK;\nFEEDERS;")
self.assertTrue(c.skipcheck)
def test_get_control_cubes(self):
control_cubes = self.tm1.cubes.get_control_cubes()
self.assertGreater(len(control_cubes), 0)
for cube in control_cubes:
self.assertTrue(cube.name.startswith("}"))
def test_get_model_cubes(self):
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertGreater(len(model_cubes), 0)
for cube in model_cubes:
self.assertFalse(cube.name.startswith("}"))
def test_get_dimension_names(self):
dimension_names = self.tm1.cubes.get_dimension_names(self.cube_name)
self.assertEqual(dimension_names, self.dimension_names)
def test_get_random_intersection(self):
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
self.assertNotEqual(intersection1, intersection2)
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
self.assertNotEqual(intersection1, intersection2)
def test_exists(self):
self.assertTrue(self.tm1.cubes.exists(self.cube_name))
self.assertFalse(self.tm1.cubes.exists(uuid.uuid4()))
def test_create_delete_cube(self):
cube_name = PREFIX + "Some_Other_Name"
dimension_names = self.tm1.dimensions.get_all_names()[1:3]
cube = Cube(cube_name, dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.create(cube)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(
len(all_cubes_before) + 1,
len(all_cubes_after))
self.assertEqual(
self.tm1.cubes.get_dimension_names(cube_name),
dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.delete(cube_name)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(len(all_cubes_before) - 1, len(all_cubes_after))
def test_get_storage_dimension_order(self):
dimensions = self.tm1.cubes.get_storage_dimension_order(cube_name=self.cube_name)
self.assertEqual(dimensions, self.dimension_names)
def test_update_storage_dimension_order(self):
self.tm1.cubes.update_storage_dimension_order(
cube_name=self.cube_name,
dimension_names=reversed(self.dimension_names))
dimensions = self.tm1.cubes.get_storage_dimension_order(self.cube_name)
self.assertEqual(
list(reversed(dimensions)),
self.dimension_names)
def test_load(self):
response = self.tm1.cubes.load(cube_name=self.cube_name)
self.assertTrue(response.ok)
def test_unload(self):
response = self.tm1.cubes.unload(cube_name=self.cube_name)
self.assertTrue(response.ok)
@classmethod
def tearDownClass(cls):
cls.tm1.cubes.delete(cls.cube_name)
for dimension in cls.dimension_names:
cls.tm1.dimensions.delete(dimension)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| true | true |
f72b4610811bde97505c92b313e84557e3fe0425 | 809 | py | Python | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from timeit import default_timer
class TimeThis:
def __init__(self, title="TimeThis"):
self.title = title
self.start_time = None
def __enter__(self):
self.start_time = default_timer()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
print('[{}] total time: {} sec'.format(self.title, default_timer() - self.start_time))
if __name__ == '__main__':
import time
with TimeThis():
time.sleep(1)
with TimeThis("Test"):
text = ''
for i in range(10 ** 6):
text += str(i)
with TimeThis("Test"):
items = []
for i in range(10 ** 6):
items.append(str(i))
text = ''.join(items)
| 19.731707 | 94 | 0.566131 |
__author__ = 'ipetrash'
from timeit import default_timer
class TimeThis:
def __init__(self, title="TimeThis"):
self.title = title
self.start_time = None
def __enter__(self):
self.start_time = default_timer()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
print('[{}] total time: {} sec'.format(self.title, default_timer() - self.start_time))
if __name__ == '__main__':
import time
with TimeThis():
time.sleep(1)
with TimeThis("Test"):
text = ''
for i in range(10 ** 6):
text += str(i)
with TimeThis("Test"):
items = []
for i in range(10 ** 6):
items.append(str(i))
text = ''.join(items)
| true | true |
f72b46c25c47804afbeb964bc15146379d6938f8 | 49,448 | py | Python | lib/sqlalchemy/ext/associationproxy.py | mattkohl/sqlalchemy | edf8e782cf5011cd43a0ee281b9e0b1d1becef1f | [
"MIT"
] | 2 | 2020-02-19T17:50:50.000Z | 2021-02-10T02:52:41.000Z | lib/sqlalchemy/ext/associationproxy.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | null | null | null | lib/sqlalchemy/ext/associationproxy.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | 1 | 2019-08-27T06:57:57.000Z | 2019-08-27T06:57:57.000Z | # ext/associationproxy.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import operator
from .. import exc
from .. import inspect
from .. import orm
from .. import util
from ..orm import collections
from ..orm import interfaces
from ..sql import or_
from ..sql.operators import ColumnOperators
def association_proxy(target_collection, attr, **kw):
r"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY")
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
"""
class AssociationProxy(interfaces.InspectionAttrInfo):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = True
extension_type = ASSOCIATION_PROXY
def __init__(
self,
target_collection,
attr,
creator=None,
getset_factory=None,
proxy_factory=None,
proxy_bulk_set=None,
info=None,
cascade_scalar_deletes=False,
):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param cascade_scalar_deletes: when True, indicates that setting
the proxied value to ``None``, or deleting it via ``del``, should
also remove the source object. Only applies to scalar attributes.
Normally, removing the proxied target will not remove the proxy
source, as this object may have other state that is still to be
kept.
.. versionadded:: 1.3
.. seealso::
:ref:`cascade_scalar_deletes` - complete usage example
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
:param info: optional, will be assigned to
:attr:`.AssociationProxy.info` if present.
.. versionadded:: 1.0.9
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.cascade_scalar_deletes = cascade_scalar_deletes
self.key = "_%s_%s_%s" % (
type(self).__name__,
target_collection,
id(self),
)
if info:
self.info = info
def __get__(self, obj, class_):
if class_ is None:
return self
inst = self._as_instance(class_, obj)
if inst:
return inst.get(obj)
# obj has to be None here
# assert obj is None
return self
def __set__(self, obj, values):
class_ = type(obj)
return self._as_instance(class_, obj).set(obj, values)
def __delete__(self, obj):
class_ = type(obj)
return self._as_instance(class_, obj).delete(obj)
def for_class(self, class_, obj=None):
r"""Return the internal state local to a specific mapped class.
E.g., given a class ``User``::
class User(Base):
# ...
keywords = association_proxy('kws', 'keyword')
If we access this :class:`.AssociationProxy` from
:attr:`.Mapper.all_orm_descriptors`, and we want to view the
target class for this proxy as mapped by ``User``::
inspect(User).all_orm_descriptors["keywords"].for_class(User).target_class
This returns an instance of :class:`.AssociationProxyInstance` that
is specific to the ``User`` class. The :class:`.AssociationProxy`
object remains agnostic of its parent class.
:param class\_: the class that we are returning state for.
:param obj: optional, an instance of the class that is required
if the attribute refers to a polymorphic target, e.g. where we have
to look at the type of the actual destination object to get the
complete path.
.. versionadded:: 1.3 - :class:`.AssociationProxy` no longer stores
any state specific to a particular parent class; the state is now
stored in per-class :class:`.AssociationProxyInstance` objects.
"""
return self._as_instance(class_, obj)
def _as_instance(self, class_, obj):
try:
inst = class_.__dict__[self.key + "_inst"]
except KeyError:
owner = self._calc_owner(class_)
if owner is not None:
inst = AssociationProxyInstance.for_proxy(self, owner, obj)
setattr(class_, self.key + "_inst", inst)
else:
inst = None
if inst is not None and not inst._is_canonical:
# the AssociationProxyInstance can't be generalized
# since the proxied attribute is not on the targeted
# class, only on subclasses of it, which might be
# different. only return for the specific
# object's current value
return inst._non_canonical_get_for_object(obj)
else:
return inst
def _calc_owner(self, target_cls):
# we might be getting invoked for a subclass
# that is not mapped yet, in some declarative situations.
# save until we are mapped
try:
insp = inspect(target_cls)
except exc.NoInspectionAvailable:
# can't find a mapper, don't set owner. if we are a not-yet-mapped
# subclass, we can also scan through __mro__ to find a mapped
# class, but instead just wait for us to be called again against a
# mapped class normally.
return None
else:
return insp.mapper.class_manager.class_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
setattr(o, attr, v)
else:
def setter(o, v):
setattr(o, attr, v)
return getter, setter
def __repr__(self):
return "AssociationProxy(%r, %r)" % (
self.target_collection,
self.value_attr,
)
class AssociationProxyInstance(object):
"""A per-class object that serves class- and object-specific results.
This is used by :class:`.AssociationProxy` when it is invoked
in terms of a specific class or instance of a class, i.e. when it is
used as a regular Python descriptor.
When referring to the :class:`.AssociationProxy` as a normal Python
descriptor, the :class:`.AssociationProxyInstance` is the object that
actually serves the information. Under normal circumstances, its presence
is transparent::
>>> User.keywords.scalar
False
In the special case that the :class:`.AssociationProxy` object is being
accessed directly, in order to get an explicit handle to the
:class:`.AssociationProxyInstance`, use the
:meth:`.AssociationProxy.for_class` method::
proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User)
# view if proxy object is scalar or not
>>> proxy_state.scalar
False
.. versionadded:: 1.3
""" # noqa
def __init__(self, parent, owning_class, target_class, value_attr):
self.parent = parent
self.key = parent.key
self.owning_class = owning_class
self.target_collection = parent.target_collection
self.collection_class = None
self.target_class = target_class
self.value_attr = value_attr
target_class = None
"""The intermediary class handled by this
:class:`.AssociationProxyInstance`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
@classmethod
def for_proxy(cls, parent, owning_class, parent_instance):
target_collection = parent.target_collection
value_attr = parent.value_attr
prop = orm.class_mapper(owning_class).get_property(target_collection)
# this was never asserted before but this should be made clear.
if not isinstance(prop, orm.RelationshipProperty):
raise NotImplementedError(
"association proxy to a non-relationship "
"intermediary is not supported"
)
target_class = prop.mapper.class_
try:
target_assoc = cls._cls_unwrap_target_assoc_proxy(
target_class, value_attr
)
except AttributeError:
# the proxied attribute doesn't exist on the target class;
# return an "ambiguous" instance that will work on a per-object
# basis
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return cls._construct_for_assoc(
target_assoc, parent, owning_class, target_class, value_attr
)
@classmethod
def _construct_for_assoc(
cls, target_assoc, parent, owning_class, target_class, value_attr
):
if target_assoc is not None:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
attr = getattr(target_class, value_attr)
if not hasattr(attr, "_is_internal_proxy"):
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
is_object = attr._impl_uses_objects
if is_object:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return ColumnAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
@property
def _comparator(self):
return self._get_property().comparator
@classmethod
def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr):
attr = getattr(target_class, value_attr)
if isinstance(attr, (AssociationProxy, AssociationProxyInstance)):
return attr
return None
@util.memoized_property
def _unwrap_target_assoc_proxy(self):
return self._cls_unwrap_target_assoc_proxy(
self.target_class, self.value_attr
)
@property
def remote_attr(self):
"""The 'remote' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. seealso::
:attr:`.AssociationProxyInstance.local_attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return (self.local_attr, self.remote_attr)
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxyInstance`
proxies a scalar relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return (
not self._get_property()
.mapper.get_property(self.value_attr)
.uselist
)
@property
def _target_is_object(self):
raise NotImplementedError()
def _initialize_scalar_accessors(self):
if self.parent.getset_factory:
get, set_ = self.parent.getset_factory(None, self)
else:
get, set_ = self.parent._default_getset(None)
self._scalar_get, self._scalar_set = get, set_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
return setattr(o, attr, v)
else:
def setter(o, v):
return setattr(o, attr, v)
return getter, setter
@property
def info(self):
return self.parent.info
def get(self, obj):
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, self_id, proxy = getattr(obj, self.key)
except AttributeError:
pass
else:
if id(obj) == creator_id and id(self) == self_id:
assert self.collection_class is not None
return proxy
self.collection_class, proxy = self._new(
_lazy_collection(obj, self.target_collection)
)
setattr(obj, self.key, (id(obj), id(self), proxy))
return proxy
def set(self, obj, values):
if self.scalar:
creator = (
self.parent.creator
if self.parent.creator
else self.target_class
)
target = getattr(obj, self.target_collection)
if target is None:
if values is None:
return
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
if values is None and self.parent.cascade_scalar_deletes:
setattr(obj, self.target_collection, None)
else:
proxy = self.get(obj)
assert self.collection_class is not None
if proxy is not values:
proxy._bulk_replace(self, values)
def delete(self, obj):
if self.owning_class is None:
self._calc_owner(obj, None)
if self.scalar:
target = getattr(obj, self.target_collection)
if target is not None:
delattr(target, self.value_attr)
delattr(obj, self.target_collection)
def _new(self, lazy_collection):
creator = (
self.parent.creator if self.parent.creator else self.target_class
)
collection_class = util.duck_type_collection(lazy_collection())
if self.parent.proxy_factory:
return (
collection_class,
self.parent.proxy_factory(
lazy_collection, creator, self.value_attr, self
),
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(collection_class, self)
else:
getter, setter = self.parent._default_getset(collection_class)
if collection_class is list:
return (
collection_class,
_AssociationList(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is dict:
return (
collection_class,
_AssociationDict(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is set:
return (
collection_class,
_AssociationSet(
lazy_collection, creator, getter, setter, self
),
)
else:
raise exc.ArgumentError(
"could not guess which interface to use for "
'collection_class "%s" backing "%s"; specify a '
"proxy_factory and proxy_bulk_set manually"
% (self.collection_class.__name__, self.target_collection)
)
def _set(self, proxy, values):
if self.parent.proxy_bulk_set:
self.parent.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
"no proxy_bulk_set supplied for custom "
"collection_class implementation"
)
def _inflate(self, proxy):
creator = (
self.parent.creator and self.parent.creator or self.target_class
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(
self.collection_class, self
)
else:
getter, setter = self.parent._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _criterion_exists(self, criterion=None, **kwargs):
is_has = kwargs.pop("is_has", None)
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
inner = target_assoc._criterion_exists(
criterion=criterion, **kwargs
)
return self._comparator._criterion_exists(inner)
if self._target_is_object:
prop = getattr(self.target_class, self.value_attr)
value_expr = prop._criterion_exists(criterion, **kwargs)
else:
if kwargs:
raise exc.ArgumentError(
"Can't apply keyword arguments to column-targeted "
"association proxy; use =="
)
elif is_has and criterion is not None:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use =="
)
value_expr = criterion
return self._comparator._criterion_exists(value_expr)
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
self.scalar
and (not self._target_is_object or self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'any()' not implemented for scalar " "attributes. Use has()."
)
return self._criterion_exists(
criterion=criterion, is_has=False, **kwargs
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
not self.scalar
or (self._target_is_object and not self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(
criterion=criterion, is_has=True, **kwargs
)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.parent)
class AmbiguousAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` where we cannot determine
the type of target object.
"""
_is_canonical = False
def _ambiguous(self):
raise AttributeError(
"Association proxy %s.%s refers to an attribute '%s' that is not "
"directly mapped on class %s; therefore this operation cannot "
"proceed since we don't know what type of object is referred "
"towards"
% (
self.owning_class.__name__,
self.target_collection,
self.value_attr,
self.target_class,
)
)
def get(self, obj):
if obj is None:
return self
else:
return super(AmbiguousAssociationProxyInstance, self).get(obj)
def __eq__(self, obj):
self._ambiguous()
def __ne__(self, obj):
self._ambiguous()
def any(self, criterion=None, **kwargs):
self._ambiguous()
def has(self, criterion=None, **kwargs):
self._ambiguous()
@util.memoized_property
def _lookup_cache(self):
# mapping of <subclass>->AssociationProxyInstance.
# e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist;
# only B1(B) and B2(B) have "b_attr", keys in here would be B1, B2
return {}
def _non_canonical_get_for_object(self, parent_instance):
if parent_instance is not None:
actual_obj = getattr(parent_instance, self.target_collection)
if actual_obj is not None:
try:
insp = inspect(actual_obj)
except exc.NoInspectionAvailable:
pass
else:
mapper = insp.mapper
instance_class = mapper.class_
if instance_class not in self._lookup_cache:
self._populate_cache(instance_class, mapper)
try:
return self._lookup_cache[instance_class]
except KeyError:
pass
# no object or ambiguous object given, so return "self", which
# is a proxy with generally only instance-level functionality
return self
def _populate_cache(self, instance_class, mapper):
prop = orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
if mapper.isa(prop.mapper):
target_class = instance_class
try:
target_assoc = self._cls_unwrap_target_assoc_proxy(
target_class, self.value_attr
)
except AttributeError:
pass
else:
self._lookup_cache[instance_class] = self._construct_for_assoc(
target_assoc,
self.parent,
self.owning_class,
target_class,
self.value_attr,
)
class ObjectAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` that has an object as a target.
"""
_target_is_object = True
_is_canonical = True
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
return self._comparator._criterion_exists(
target_assoc.contains(obj)
if not target_assoc.scalar
else target_assoc == obj
)
elif (
self._target_is_object
and self.scalar
and not self._value_is_scalar
):
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
elif self._target_is_object and self.scalar and self._value_is_scalar:
raise exc.InvalidRequestError(
"contains() doesn't apply to a scalar object endpoint; use =="
)
else:
return self._comparator._criterion_exists(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None,
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj
)
class ColumnAssociationProxyInstance(
ColumnOperators, AssociationProxyInstance
):
"""an :class:`.AssociationProxyInstance` that has a database column as a
target.
"""
_target_is_object = False
_is_canonical = True
def __eq__(self, other):
# special case "is None" to check for no related row as well
expr = self._criterion_exists(
self.remote_attr.operate(operator.eq, other)
)
if other is None:
return or_(expr, self._comparator == None)
else:
return expr
def operate(self, op, *other, **kwargs):
return self._criterion_exists(
self.remote_attr.operate(op, *other, **kwargs)
)
class _lazy_collection(object):
def __init__(self, obj, target):
self.parent = obj
self.target = target
def __call__(self):
return getattr(self.parent, self.target)
def __getstate__(self):
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state):
self.parent = state["obj"]
self.target = state["target"]
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state):
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def _bulk_replace(self, assoc_proxy, values):
self.clear()
assoc_proxy._set(self, values)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, value):
return self.setter(object_, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
return
def append(self, value):
col = self.col
item = self._create(value)
col.append(item)
def count(self, value):
return sum(
[
1
for _ in util.itertools_filter(
lambda v: v == value, iter(self)
)
]
)
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0 : len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return util.cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def index(self, item, *args):
return list(self).index(item, *args)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol("_NotProvided")
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, key, value):
return self.setter(object_, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return util.cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError(
"update expected at most 1 arguments, got %i" % len(a)
)
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, "keys"):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples"
)
for key, value in kw:
self[key] = value
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
for key, member in values.items() or ():
if key in additions:
self[key] = member
elif key in constants:
self[key] = member
for key in removals:
del self[key]
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(dict, func_name)
):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError("pop from an empty set")
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
appender = self.add
remover = self.remove
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member)
for member in removals:
remover(member)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(set, func_name)
):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| 31.217172 | 86 | 0.596809 |
import operator
from .. import exc
from .. import inspect
from .. import orm
from .. import util
from ..orm import collections
from ..orm import interfaces
from ..sql import or_
from ..sql.operators import ColumnOperators
def association_proxy(target_collection, attr, **kw):
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY")
class AssociationProxy(interfaces.InspectionAttrInfo):
is_attribute = True
extension_type = ASSOCIATION_PROXY
def __init__(
self,
target_collection,
attr,
creator=None,
getset_factory=None,
proxy_factory=None,
proxy_bulk_set=None,
info=None,
cascade_scalar_deletes=False,
):
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.cascade_scalar_deletes = cascade_scalar_deletes
self.key = "_%s_%s_%s" % (
type(self).__name__,
target_collection,
id(self),
)
if info:
self.info = info
def __get__(self, obj, class_):
if class_ is None:
return self
inst = self._as_instance(class_, obj)
if inst:
return inst.get(obj)
return self
def __set__(self, obj, values):
class_ = type(obj)
return self._as_instance(class_, obj).set(obj, values)
def __delete__(self, obj):
class_ = type(obj)
return self._as_instance(class_, obj).delete(obj)
def for_class(self, class_, obj=None):
return self._as_instance(class_, obj)
def _as_instance(self, class_, obj):
try:
inst = class_.__dict__[self.key + "_inst"]
except KeyError:
owner = self._calc_owner(class_)
if owner is not None:
inst = AssociationProxyInstance.for_proxy(self, owner, obj)
setattr(class_, self.key + "_inst", inst)
else:
inst = None
if inst is not None and not inst._is_canonical:
# since the proxied attribute is not on the targeted
# class, only on subclasses of it, which might be
# different. only return for the specific
# object's current value
return inst._non_canonical_get_for_object(obj)
else:
return inst
def _calc_owner(self, target_cls):
try:
insp = inspect(target_cls)
except exc.NoInspectionAvailable:
return None
else:
return insp.mapper.class_manager.class_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
setattr(o, attr, v)
else:
def setter(o, v):
setattr(o, attr, v)
return getter, setter
def __repr__(self):
return "AssociationProxy(%r, %r)" % (
self.target_collection,
self.value_attr,
)
class AssociationProxyInstance(object):
def __init__(self, parent, owning_class, target_class, value_attr):
self.parent = parent
self.key = parent.key
self.owning_class = owning_class
self.target_collection = parent.target_collection
self.collection_class = None
self.target_class = target_class
self.value_attr = value_attr
target_class = None
@classmethod
def for_proxy(cls, parent, owning_class, parent_instance):
target_collection = parent.target_collection
value_attr = parent.value_attr
prop = orm.class_mapper(owning_class).get_property(target_collection)
if not isinstance(prop, orm.RelationshipProperty):
raise NotImplementedError(
"association proxy to a non-relationship "
"intermediary is not supported"
)
target_class = prop.mapper.class_
try:
target_assoc = cls._cls_unwrap_target_assoc_proxy(
target_class, value_attr
)
except AttributeError:
# return an "ambiguous" instance that will work on a per-object
# basis
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return cls._construct_for_assoc(
target_assoc, parent, owning_class, target_class, value_attr
)
@classmethod
def _construct_for_assoc(
cls, target_assoc, parent, owning_class, target_class, value_attr
):
if target_assoc is not None:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
attr = getattr(target_class, value_attr)
if not hasattr(attr, "_is_internal_proxy"):
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
is_object = attr._impl_uses_objects
if is_object:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return ColumnAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
@property
def _comparator(self):
return self._get_property().comparator
@classmethod
def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr):
attr = getattr(target_class, value_attr)
if isinstance(attr, (AssociationProxy, AssociationProxyInstance)):
return attr
return None
@util.memoized_property
def _unwrap_target_assoc_proxy(self):
return self._cls_unwrap_target_assoc_proxy(
self.target_class, self.value_attr
)
@property
def remote_attr(self):
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
return (self.local_attr, self.remote_attr)
@util.memoized_property
def scalar(self):
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return (
not self._get_property()
.mapper.get_property(self.value_attr)
.uselist
)
@property
def _target_is_object(self):
raise NotImplementedError()
def _initialize_scalar_accessors(self):
if self.parent.getset_factory:
get, set_ = self.parent.getset_factory(None, self)
else:
get, set_ = self.parent._default_getset(None)
self._scalar_get, self._scalar_set = get, set_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
return setattr(o, attr, v)
else:
def setter(o, v):
return setattr(o, attr, v)
return getter, setter
@property
def info(self):
return self.parent.info
def get(self, obj):
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, self_id, proxy = getattr(obj, self.key)
except AttributeError:
pass
else:
if id(obj) == creator_id and id(self) == self_id:
assert self.collection_class is not None
return proxy
self.collection_class, proxy = self._new(
_lazy_collection(obj, self.target_collection)
)
setattr(obj, self.key, (id(obj), id(self), proxy))
return proxy
def set(self, obj, values):
if self.scalar:
creator = (
self.parent.creator
if self.parent.creator
else self.target_class
)
target = getattr(obj, self.target_collection)
if target is None:
if values is None:
return
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
if values is None and self.parent.cascade_scalar_deletes:
setattr(obj, self.target_collection, None)
else:
proxy = self.get(obj)
assert self.collection_class is not None
if proxy is not values:
proxy._bulk_replace(self, values)
def delete(self, obj):
if self.owning_class is None:
self._calc_owner(obj, None)
if self.scalar:
target = getattr(obj, self.target_collection)
if target is not None:
delattr(target, self.value_attr)
delattr(obj, self.target_collection)
def _new(self, lazy_collection):
creator = (
self.parent.creator if self.parent.creator else self.target_class
)
collection_class = util.duck_type_collection(lazy_collection())
if self.parent.proxy_factory:
return (
collection_class,
self.parent.proxy_factory(
lazy_collection, creator, self.value_attr, self
),
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(collection_class, self)
else:
getter, setter = self.parent._default_getset(collection_class)
if collection_class is list:
return (
collection_class,
_AssociationList(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is dict:
return (
collection_class,
_AssociationDict(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is set:
return (
collection_class,
_AssociationSet(
lazy_collection, creator, getter, setter, self
),
)
else:
raise exc.ArgumentError(
"could not guess which interface to use for "
'collection_class "%s" backing "%s"; specify a '
"proxy_factory and proxy_bulk_set manually"
% (self.collection_class.__name__, self.target_collection)
)
def _set(self, proxy, values):
if self.parent.proxy_bulk_set:
self.parent.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
"no proxy_bulk_set supplied for custom "
"collection_class implementation"
)
def _inflate(self, proxy):
creator = (
self.parent.creator and self.parent.creator or self.target_class
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(
self.collection_class, self
)
else:
getter, setter = self.parent._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _criterion_exists(self, criterion=None, **kwargs):
is_has = kwargs.pop("is_has", None)
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
inner = target_assoc._criterion_exists(
criterion=criterion, **kwargs
)
return self._comparator._criterion_exists(inner)
if self._target_is_object:
prop = getattr(self.target_class, self.value_attr)
value_expr = prop._criterion_exists(criterion, **kwargs)
else:
if kwargs:
raise exc.ArgumentError(
"Can't apply keyword arguments to column-targeted "
"association proxy; use =="
)
elif is_has and criterion is not None:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use =="
)
value_expr = criterion
return self._comparator._criterion_exists(value_expr)
def any(self, criterion=None, **kwargs):
if self._unwrap_target_assoc_proxy is None and (
self.scalar
and (not self._target_is_object or self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'any()' not implemented for scalar " "attributes. Use has()."
)
return self._criterion_exists(
criterion=criterion, is_has=False, **kwargs
)
def has(self, criterion=None, **kwargs):
if self._unwrap_target_assoc_proxy is None and (
not self.scalar
or (self._target_is_object and not self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(
criterion=criterion, is_has=True, **kwargs
)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.parent)
class AmbiguousAssociationProxyInstance(AssociationProxyInstance):
_is_canonical = False
def _ambiguous(self):
raise AttributeError(
"Association proxy %s.%s refers to an attribute '%s' that is not "
"directly mapped on class %s; therefore this operation cannot "
"proceed since we don't know what type of object is referred "
"towards"
% (
self.owning_class.__name__,
self.target_collection,
self.value_attr,
self.target_class,
)
)
def get(self, obj):
if obj is None:
return self
else:
return super(AmbiguousAssociationProxyInstance, self).get(obj)
def __eq__(self, obj):
self._ambiguous()
def __ne__(self, obj):
self._ambiguous()
def any(self, criterion=None, **kwargs):
self._ambiguous()
def has(self, criterion=None, **kwargs):
self._ambiguous()
@util.memoized_property
def _lookup_cache(self):
# mapping of <subclass>->AssociationProxyInstance.
# e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist;
return {}
def _non_canonical_get_for_object(self, parent_instance):
if parent_instance is not None:
actual_obj = getattr(parent_instance, self.target_collection)
if actual_obj is not None:
try:
insp = inspect(actual_obj)
except exc.NoInspectionAvailable:
pass
else:
mapper = insp.mapper
instance_class = mapper.class_
if instance_class not in self._lookup_cache:
self._populate_cache(instance_class, mapper)
try:
return self._lookup_cache[instance_class]
except KeyError:
pass
return self
def _populate_cache(self, instance_class, mapper):
prop = orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
if mapper.isa(prop.mapper):
target_class = instance_class
try:
target_assoc = self._cls_unwrap_target_assoc_proxy(
target_class, self.value_attr
)
except AttributeError:
pass
else:
self._lookup_cache[instance_class] = self._construct_for_assoc(
target_assoc,
self.parent,
self.owning_class,
target_class,
self.value_attr,
)
class ObjectAssociationProxyInstance(AssociationProxyInstance):
_target_is_object = True
_is_canonical = True
def contains(self, obj):
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
return self._comparator._criterion_exists(
target_assoc.contains(obj)
if not target_assoc.scalar
else target_assoc == obj
)
elif (
self._target_is_object
and self.scalar
and not self._value_is_scalar
):
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
elif self._target_is_object and self.scalar and self._value_is_scalar:
raise exc.InvalidRequestError(
"contains() doesn't apply to a scalar object endpoint; use =="
)
else:
return self._comparator._criterion_exists(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None,
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj
)
class ColumnAssociationProxyInstance(
ColumnOperators, AssociationProxyInstance
):
_target_is_object = False
_is_canonical = True
def __eq__(self, other):
# special case "is None" to check for no related row as well
expr = self._criterion_exists(
self.remote_attr.operate(operator.eq, other)
)
if other is None:
return or_(expr, self._comparator == None)
else:
return expr
def operate(self, op, *other, **kwargs):
return self._criterion_exists(
self.remote_attr.operate(op, *other, **kwargs)
)
class _lazy_collection(object):
def __init__(self, obj, target):
self.parent = obj
self.target = target
def __call__(self):
return getattr(self.parent, self.target)
def __getstate__(self):
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state):
self.parent = state["obj"]
self.target = state["target"]
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state):
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def _bulk_replace(self, assoc_proxy, values):
self.clear()
assoc_proxy._set(self, values)
class _AssociationList(_AssociationCollection):
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, value):
return self.setter(object_, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
for member in self.col:
yield self._get(member)
return
def append(self, value):
col = self.col
item = self._create(value)
col.append(item)
def count(self, value):
return sum(
[
1
for _ in util.itertools_filter(
lambda v: v == value, iter(self)
)
]
)
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
raise NotImplementedError
def sort(self):
raise NotImplementedError
def clear(self):
del self.col[0 : len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return util.cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def index(self, item, *args):
return list(self).index(item, *args)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol("_NotProvided")
class _AssociationDict(_AssociationCollection):
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, key, value):
return self.setter(object_, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return util.cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError(
"update expected at most 1 arguments, got %i" % len(a)
)
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, "keys"):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples"
)
for key, value in kw:
self[key] = value
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
for key, member in values.items() or ():
if key in additions:
self[key] = member
elif key in constants:
self[key] = member
for key in removals:
del self[key]
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(dict, func_name)
):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError("pop from an empty set")
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
appender = self.add
remover = self.remove
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member)
for member in removals:
remover(member)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(set, func_name)
):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| true | true |
f72b47b083e75e7fc1652ef55122a7c099974a7c | 2,333 | py | Python | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | 1 | 2022-01-16T01:15:21.000Z | 2022-01-16T01:15:21.000Z | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | null | null | null | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | null | null | null | """
This module contains all views related to multi-factor authentication
"""
import logging
import time
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import FormView
from ....forms import AuthenticationForm
logger = logging.getLogger(__name__)
@method_decorator(login_required, name="dispatch")
class AuthenticateModifyMfaView(FormView):
"""
View to authenticate a user before changing the mfa settings
"""
#: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)
template_name = "settings/mfa/authenticate.html"
#: The form class for this form view (see :class:`~django.views.generic.edit.FormMixin`)
form_class = AuthenticationForm
#: The URL to redirect to when the form is successfully processed (see :class:`~django.views.generic.edit.FormMixin`)
success_url = reverse_lazy("register_new_mfa_key")
def form_valid(self, form):
"""
This function overwrites :meth:`~django.views.generic.edit.FormMixin.form_valid` which is called if the
:class:`~integreat_cms.cms.forms.users.authentication_form.AuthenticationForm` is valid. In case the user provided correct credentials,
the current time is saved in a session variable so a timeout of the authentication can be implemented.
:param form: Authentication form
:type form: ~integreat_cms.cms.forms.users.authentication_form.AuthenticationForm
:return: Redirect user to mfa login view or to :attr:`~integreat_cms.core.settings.LOGIN_REDIRECT_URL`
:rtype: ~django.http.HttpResponseRedirect
"""
if check_password(form.cleaned_data["password"], self.request.user.password):
self.request.session["modify_mfa_authentication_time"] = time.time()
if "mfa_redirect_url" in self.request.session:
return redirect(self.request.session["mfa_redirect_url"])
return super().form_valid(form)
form.add_error("password", _("The provided password is not correct"))
return super().form_invalid(form)
| 44.865385 | 143 | 0.743678 | import logging
import time
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import FormView
from ....forms import AuthenticationForm
logger = logging.getLogger(__name__)
@method_decorator(login_required, name="dispatch")
class AuthenticateModifyMfaView(FormView):
template_name = "settings/mfa/authenticate.html"
form_class = AuthenticationForm
success_url = reverse_lazy("register_new_mfa_key")
def form_valid(self, form):
if check_password(form.cleaned_data["password"], self.request.user.password):
self.request.session["modify_mfa_authentication_time"] = time.time()
if "mfa_redirect_url" in self.request.session:
return redirect(self.request.session["mfa_redirect_url"])
return super().form_valid(form)
form.add_error("password", _("The provided password is not correct"))
return super().form_invalid(form)
| true | true |
f72b482966239f05aa2cebf8fa85221da508dbeb | 44 | py | Python | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | # 영문자 1개 입력받아 10진수로 변환하기
print(ord(input())) | 22 | 24 | 0.704545 |
print(ord(input())) | true | true |
f72b491d57302f379d5a1bd917da7dd51854de51 | 11,032 | py | Python | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | 1 | 2020-02-24T10:21:37.000Z | 2020-02-24T10:21:37.000Z | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | null | null | null | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | null | null | null | import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
| 35.818182 | 100 | 0.562817 | import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
| true | true |
f72b4933b2a73e756ab6c71f5af4eb81142adabf | 978 | py | Python | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 5 | 2017-04-07T06:23:04.000Z | 2019-11-19T00:52:34.000Z | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 16 | 2018-09-12T11:14:40.000Z | 2021-04-19T09:02:44.000Z | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 14 | 2017-05-11T14:26:26.000Z | 2021-07-14T14:00:06.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ecl.tests.functional import base
class TestStock(base.BaseFunctionalTest):
def test_01_get_stock(self):
stock = self.conn.baremetal.get_stock(
"44d4ce9e-cf3d-4853-bdc9-95680bf95668", "groupa"
)
self.assertIsInstance(stock.flavor_id, six.string_types)
self.assertIsInstance(stock.availability_zone, six.string_types)
self.assertIsInstance(stock.stock, bool)
| 37.615385 | 75 | 0.742331 |
import six
from ecl.tests.functional import base
class TestStock(base.BaseFunctionalTest):
def test_01_get_stock(self):
stock = self.conn.baremetal.get_stock(
"44d4ce9e-cf3d-4853-bdc9-95680bf95668", "groupa"
)
self.assertIsInstance(stock.flavor_id, six.string_types)
self.assertIsInstance(stock.availability_zone, six.string_types)
self.assertIsInstance(stock.stock, bool)
| true | true |
f72b4949f2249b81afca92d0ad7c2bcf75710a96 | 9,579 | py | Python | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""Distribution class."""
from PoPs import IDs as IDsPoPsModule
from fudge import abstractClasses as abstractClassesModule
from xData import standards as standardsModule
from . import angular as angularModule
from . import energy as energyModule
from . import energyAngular as energyAngularModule
from . import energyAngularMC as energyAngularMCModule
from . import angularEnergyMC as angularEnergyMCModule
from . import KalbachMann as KalbachMannModule
from . import angularEnergy as angularEnergyModule
from . import LLNL_angularEnergy as LLNL_angularEnergyModule
from . import uncorrelated as uncorrelatedModule
from . import Legendre as LegendreModule
from . import photonScattering as photonScatteringModule
from . import reference as referenceModule
from . import multiGroup as multiGroupModule
from . import unspecified as unspecifiedModule
from . import branching3d as branching3dModule
# probably missing stuff from photonScattering.py.
__metaclass__ = type
class component( abstractClassesModule.component ) :
moniker = 'distribution'
def __init__( self ) :
abstractClassesModule.component.__init__( self, ( angularModule.form, angularModule.twoBodyForm,
KalbachMannModule.form,
energyAngularModule.form, energyAngularMCModule.form,
angularEnergyModule.form, angularEnergyMCModule.form,
LLNL_angularEnergyModule.LLNLAngularEnergyForm,
uncorrelatedModule.form, LegendreModule.form, referenceModule.form,
referenceModule.CoulombPlusNuclearElastic, referenceModule.thermalNeutronScatteringLaw,
photonScatteringModule.coherentPhotonScattering.form, photonScatteringModule.incoherentPhotonScattering.form,
multiGroupModule.form, unspecifiedModule.form, branching3dModule.form ) )
def energySpectrumAtEnergy( self, energyIn, frame, **kwargs ) :
"""Returns the energy spectrum in the lab frame for the specified incident energy."""
styleLabel = kwargs.get( 'styleLabel', self.evaluated.label )
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
if( frame == standardsModule.frames.centerOfMassToken ) :
if( form.productFrame == standardsModule.frames.labToken ) : form = None
else :
form = None
if( form is not None ) :
return( form.energySpectrumAtEnergy( energyIn, frame ) )
else :
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
print( ' WARNING: lab to center-of-mass translation not supported.' )
else :
print( ' WARNING: distribution "%s" does not have energySpectrumAtEnergy method.' % form.moniker )
print( ' %s' % self.toXLink( ) )
return( energyModule.XYs1d( axes = energyModule.defaultAxes( form.domainUnit ) ) )
def getSpectrumAtEnergy( self, energy ) :
"""This method is deprecated, use energySpectrumAtEnergy instead. Returns the energy spectrum for self at projectile energy."""
return( self.energySpectrumAtEnergy( energy, standardsModule.frames.labToken ) )
def calculateAverageProductData( self, style, indent = '', **kwargs ) :
form = style.findFormMatchingDerivedStyle( self )
if( form is None ) : raise Exception( 'No matching style' )
return( form.calculateAverageProductData( style, indent = indent, **kwargs ) )
def check( self, info ):
"""check all distribution forms"""
from fudge import warning
warnings = []
for form in self:
if info['isTwoBody']:
if( form.productFrame != standardsModule.frames.centerOfMassToken ) :
warnings.append( warning.wrong2BodyFrame( form ) )
if form.moniker not in (angularModule.twoBodyForm.moniker,
referenceModule.form.moniker,
referenceModule.CoulombPlusNuclearElastic.moniker,
unspecifiedModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, '2-body' ) )
else:
if form.moniker in (angularModule.twoBodyForm.moniker,
angularModule.form.moniker,
energyModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, 'N-body' ) )
def checkSubform( subform, contextMessage ):
distributionErrors = []
if hasattr(subform, 'domainMin') and (subform.domainMin, subform.domainMax) != info['crossSectionDomain']:
domain = (subform.domainMin, subform.domainMax)
# For gamma products, domainMin should be >= cross section start, upper bounds should match.
if( self.ancestor.id == IDsPoPsModule.photon ) :
startRatio = subform.domainMin / info['crossSectionDomain'][0]
endRatio = subform.domainMax / info['crossSectionDomain'][1]
if (startRatio < 1-standardsModule.floats.epsilon or endRatio < 1-standardsModule.floats.epsilon
or endRatio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
# For all other products, check lower and upper edges: only warn if they disagree by > eps
else:
for e1,e2 in zip(domain, info['crossSectionDomain']):
ratio = e1 / e2
if (ratio < 1-standardsModule.floats.epsilon or ratio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
break
if not hasattr(subform,'check'):
distributionErrors.append( warning.NotImplemented(subform.moniker, subform ) )
if info['failOnException']:
raise NotImplementedError("Checking distribution form '%s'" % subform.moniker)
else:
distributionErrors += subform.check( info )
if distributionErrors:
warnings.append( warning.context( contextMessage + " - %s:" % subform.moniker, distributionErrors) )
if isinstance(form, uncorrelatedModule.form):
for subformName in ('angularSubform','energySubform'):
subform = getattr(form, subformName ).data
checkSubform( subform, 'uncorrelated - ' + subformName.replace('Subform','') )
elif isinstance(form, KalbachMannModule.form):
checkSubform( form, form.moniker )
else:
for subform in form.subforms:
checkSubform( subform, form.moniker )
return warnings
def diff( self, other, diffResults ) :
if( self.hasData( ) != other.hasData( ) ) :
if( self.hasData( ) ) :
diffResults.append( 'Distribution unspecified - 2', '', self.toXLink( ), other.toXLink( ) )
else :
diffResults.append( 'Distribution unspecified - 1', '', self.toXLink( ), other.toXLink( ) )
def patch( self, other ) :
pass
def findEntity( self, entityName, attribute = None, value = None ):
"""
Overrides ancestry.findEntity. Need ability to find specific distribution component
"""
if attribute is not None:
for entity in self:
if entity.moniker == entityName and getattr(entity,attribute) == value:
return entity
else:
for entity in self:
if entity.moniker == entityName:
return entity
return abstractClassesModule.component.findEntity( self, entityName, attribute, value )
def hasData( self ) :
"""
Returns False if self's only has unspecified form; otherwise, returns True.
"""
for form in self :
if( not( isinstance( form, unspecifiedModule.form ) ) ) : return( True )
return( False )
def integrate( self, reaction_suite, energyIn, energyOut = None, muOut = None, phiOut = None, frame = standardsModule.frames.productToken, LegendreOrder = 0 ) :
if( len( self ) > 0 ) :
form = self[0]
# if( form.productFrame == standardsModule.frames.centerOfMassToken ) : return( 0.0 )
if( hasattr( form, 'integrate' ) ) :
return( form.integrate( reaction_suite, energyIn, energyOut = energyOut, muOut = muOut, phiOut = phiOut, frame = frame, LegendreOrder = LegendreOrder ) )
else :
print( 'missing integrate', type( form ) )
return( 0.0 )
def toPointwise_withLinearXYs( self, **kwargs ) :
return( self.evaluated.toPointwise_withLinearXYs( **kwargs ) )
| 48.135678 | 169 | 0.614156 |
from PoPs import IDs as IDsPoPsModule
from fudge import abstractClasses as abstractClassesModule
from xData import standards as standardsModule
from . import angular as angularModule
from . import energy as energyModule
from . import energyAngular as energyAngularModule
from . import energyAngularMC as energyAngularMCModule
from . import angularEnergyMC as angularEnergyMCModule
from . import KalbachMann as KalbachMannModule
from . import angularEnergy as angularEnergyModule
from . import LLNL_angularEnergy as LLNL_angularEnergyModule
from . import uncorrelated as uncorrelatedModule
from . import Legendre as LegendreModule
from . import photonScattering as photonScatteringModule
from . import reference as referenceModule
from . import multiGroup as multiGroupModule
from . import unspecified as unspecifiedModule
from . import branching3d as branching3dModule
__metaclass__ = type
class component( abstractClassesModule.component ) :
moniker = 'distribution'
def __init__( self ) :
abstractClassesModule.component.__init__( self, ( angularModule.form, angularModule.twoBodyForm,
KalbachMannModule.form,
energyAngularModule.form, energyAngularMCModule.form,
angularEnergyModule.form, angularEnergyMCModule.form,
LLNL_angularEnergyModule.LLNLAngularEnergyForm,
uncorrelatedModule.form, LegendreModule.form, referenceModule.form,
referenceModule.CoulombPlusNuclearElastic, referenceModule.thermalNeutronScatteringLaw,
photonScatteringModule.coherentPhotonScattering.form, photonScatteringModule.incoherentPhotonScattering.form,
multiGroupModule.form, unspecifiedModule.form, branching3dModule.form ) )
def energySpectrumAtEnergy( self, energyIn, frame, **kwargs ) :
styleLabel = kwargs.get( 'styleLabel', self.evaluated.label )
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
if( frame == standardsModule.frames.centerOfMassToken ) :
if( form.productFrame == standardsModule.frames.labToken ) : form = None
else :
form = None
if( form is not None ) :
return( form.energySpectrumAtEnergy( energyIn, frame ) )
else :
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
print( ' WARNING: lab to center-of-mass translation not supported.' )
else :
print( ' WARNING: distribution "%s" does not have energySpectrumAtEnergy method.' % form.moniker )
print( ' %s' % self.toXLink( ) )
return( energyModule.XYs1d( axes = energyModule.defaultAxes( form.domainUnit ) ) )
def getSpectrumAtEnergy( self, energy ) :
return( self.energySpectrumAtEnergy( energy, standardsModule.frames.labToken ) )
def calculateAverageProductData( self, style, indent = '', **kwargs ) :
form = style.findFormMatchingDerivedStyle( self )
if( form is None ) : raise Exception( 'No matching style' )
return( form.calculateAverageProductData( style, indent = indent, **kwargs ) )
def check( self, info ):
from fudge import warning
warnings = []
for form in self:
if info['isTwoBody']:
if( form.productFrame != standardsModule.frames.centerOfMassToken ) :
warnings.append( warning.wrong2BodyFrame( form ) )
if form.moniker not in (angularModule.twoBodyForm.moniker,
referenceModule.form.moniker,
referenceModule.CoulombPlusNuclearElastic.moniker,
unspecifiedModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, '2-body' ) )
else:
if form.moniker in (angularModule.twoBodyForm.moniker,
angularModule.form.moniker,
energyModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, 'N-body' ) )
def checkSubform( subform, contextMessage ):
distributionErrors = []
if hasattr(subform, 'domainMin') and (subform.domainMin, subform.domainMax) != info['crossSectionDomain']:
domain = (subform.domainMin, subform.domainMax)
if( self.ancestor.id == IDsPoPsModule.photon ) :
startRatio = subform.domainMin / info['crossSectionDomain'][0]
endRatio = subform.domainMax / info['crossSectionDomain'][1]
if (startRatio < 1-standardsModule.floats.epsilon or endRatio < 1-standardsModule.floats.epsilon
or endRatio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
else:
for e1,e2 in zip(domain, info['crossSectionDomain']):
ratio = e1 / e2
if (ratio < 1-standardsModule.floats.epsilon or ratio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
break
if not hasattr(subform,'check'):
distributionErrors.append( warning.NotImplemented(subform.moniker, subform ) )
if info['failOnException']:
raise NotImplementedError("Checking distribution form '%s'" % subform.moniker)
else:
distributionErrors += subform.check( info )
if distributionErrors:
warnings.append( warning.context( contextMessage + " - %s:" % subform.moniker, distributionErrors) )
if isinstance(form, uncorrelatedModule.form):
for subformName in ('angularSubform','energySubform'):
subform = getattr(form, subformName ).data
checkSubform( subform, 'uncorrelated - ' + subformName.replace('Subform','') )
elif isinstance(form, KalbachMannModule.form):
checkSubform( form, form.moniker )
else:
for subform in form.subforms:
checkSubform( subform, form.moniker )
return warnings
def diff( self, other, diffResults ) :
if( self.hasData( ) != other.hasData( ) ) :
if( self.hasData( ) ) :
diffResults.append( 'Distribution unspecified - 2', '', self.toXLink( ), other.toXLink( ) )
else :
diffResults.append( 'Distribution unspecified - 1', '', self.toXLink( ), other.toXLink( ) )
def patch( self, other ) :
pass
def findEntity( self, entityName, attribute = None, value = None ):
if attribute is not None:
for entity in self:
if entity.moniker == entityName and getattr(entity,attribute) == value:
return entity
else:
for entity in self:
if entity.moniker == entityName:
return entity
return abstractClassesModule.component.findEntity( self, entityName, attribute, value )
def hasData( self ) :
for form in self :
if( not( isinstance( form, unspecifiedModule.form ) ) ) : return( True )
return( False )
def integrate( self, reaction_suite, energyIn, energyOut = None, muOut = None, phiOut = None, frame = standardsModule.frames.productToken, LegendreOrder = 0 ) :
if( len( self ) > 0 ) :
form = self[0]
if( hasattr( form, 'integrate' ) ) :
return( form.integrate( reaction_suite, energyIn, energyOut = energyOut, muOut = muOut, phiOut = phiOut, frame = frame, LegendreOrder = LegendreOrder ) )
else :
print( 'missing integrate', type( form ) )
return( 0.0 )
def toPointwise_withLinearXYs( self, **kwargs ) :
return( self.evaluated.toPointwise_withLinearXYs( **kwargs ) )
| true | true |
f72b4999422344e122188abc17640c29c420b644 | 5,593 | py | Python | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | 1 | 2020-11-24T16:59:37.000Z | 2020-11-24T16:59:37.000Z | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.base_example_gen_executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(
pipeline,
input_dict, # pylint: disable=unused-argument
exec_properties, # pylint: disable=unused-argument
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 30000
elif split_pattern == 'train/*':
size = 20000
elif split_pattern == 'eval/*':
size = 10000
assert size != 0
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
return pipeline | beam.Create(mock_examples)
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super(BaseExampleGenExecutorTest, self).setUp()
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
self._output_dict = {'examples': [examples]}
self._train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
def testDoInputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(), preserving_proto_field_name=True)
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Input train split is bigger than eval split.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
def testDoOutputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Output split ratio: train:eval=2:1.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| 36.555556 | 79 | 0.66619 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(
pipeline,
input_dict,
exec_properties,
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 30000
elif split_pattern == 'train/*':
size = 20000
elif split_pattern == 'eval/*':
size = 10000
assert size != 0
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
return pipeline | beam.Create(mock_examples)
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super(BaseExampleGenExecutorTest, self).setUp()
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
self._output_dict = {'examples': [examples]}
self._train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
def testDoInputSplit(self):
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(), preserving_proto_field_name=True)
}
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
def testDoOutputSplit(self):
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| true | true |
f72b4a5949a2383abf083c3da9e19c3770c8c953 | 6,683 | py | Python | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | 3 | 2022-02-06T09:13:51.000Z | 2022-02-19T15:03:35.000Z | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | 1 | 2022-02-14T23:16:27.000Z | 2022-02-14T23:16:27.000Z | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | null | null | null | import os
import time
import math
import numpy as np
import torch
# torch.multiprocessing.set_start_method('spawn')
torch.multiprocessing.set_start_method('forkserver', force=True)
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import Namespace
from typing import List
from dglt.data.dataset.molecular import MoleculeDataset
from dglt.data.transformer.scaler import StandardScaler
from dglt.data.transformer.collator import MolCollator
from dglt.data.dataset.utils import get_data, get_data_from_smiles
from dglt.utils import load_args, load_checkpoint, load_scalers
from deploy import get_newest_train_args
from third_party.dimorphite_dl.acid_base import mol_cls
class MoleProp(object):
"""Molecular Properties Prediction Service"""
def __init__(self, checkpoint_dir, debug=print):
self.debug_ = debug
self.checkpoint_paths_ = []
for root, _, files in os.walk(checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
self.checkpoint_paths_.append(os.path.join(root, fname))
def load_model(self, args: Namespace):
"""
Load checkpoints
:param args: Arguments.
:return:
"""
self.scaler_, self.features_scaler_ = load_scalers(self.checkpoint_paths_[0])
self.train_args = load_args(self.checkpoint_paths_[0])
self.args_ = args
for key, value in vars(self.train_args).items():
if not hasattr(self.args_, key):
setattr(self.args_, key, value)
# update args with newest training args
newest_train_args = get_newest_train_args()
for key, value in vars(newest_train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
if args.features_path:
args.features_path = None
args.features_generator = ['rdkit_2d_normalized']
self.models_ = []
for checkpoint_path in tqdm(self.checkpoint_paths_, total=len(self.checkpoint_paths_)):
self.models_.append(load_checkpoint(checkpoint_path, cuda=self.args_.cuda, current_args=self.args_))
def inference(self,
model: nn.Module,
data: MoleculeDataset,
args,
batch_size: int,
shared_dict,
scaler: StandardScaler = None
) -> List[List[float]]:
"""
Do inference
:param model: model.
:param data: input data.
:param args: Arguments.
:param batch_size: batch size.
:param shared_dict: shared_dict of model.
:param scaler: scaler of input data.
:return: prediction of molecular properties.
"""
# model.share_memory()
model.eval()
args.bond_drop_rate = 0
preds = []
iter_count = 0
mol_collator = MolCollator(args=args, shared_dict=shared_dict)
mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=mol_collator)
for i, item in enumerate(mol_loader):
smiles_batch, batch, features_batch, mask, _ = item
with torch.no_grad():
batch_preds = model(batch, features_batch)
iter_count += args.batch_size
batch_preds = batch_preds.data.cpu().numpy()
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
batch_preds = batch_preds.tolist()
preds.extend(batch_preds)
return preds
def postprocessing(self, task: str = None, smiles: List[str] = None, preds: np.ndarray = None):
if task == 'caco2':
for i in range(preds.shape[0]):
if preds[i] is not None:
for j in range(len(preds[i])):
preds[i][j] = (math.pow(10, preds[i][j]) - 1) / 10
elif task == 'pka':
acid_base = mol_cls(smiles)
preds[acid_base == None] = np.nan
preds = np.column_stack((preds, np.array(acid_base, dtype=np.float)))
elif task == 'ppb':
preds[preds > 1] = 1
preds[preds < 0] = 0
return preds
def predict(self, task: str = None, smiles: List[str] = None):
"""
Predict molecular properties.
:param smiles: input data.
:return: molecular properties.
"""
self.debug_('Loading data')
tic = time.time()
self.args_.max_workers = 30
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=True, args=self.args_)
else:
test_data = get_data(path=self.args_.input_file, args=self.args_,
use_compound_names=self.args_.use_compound_names,
skip_invalid_smiles=True)
toc = time.time()
self.debug_('loading data: {}s'.format(toc - tic))
self.debug_('Validating SMILES')
tic = time.time()
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
# Edge case if empty list of smiles is provided
if len(test_data) == 0:
return [None] * len(full_data)
# Normalize features
if self.train_args.features_scaling:
test_data.normalize_features(self.features_scaler)
sum_preds = np.zeros((len(test_data), self.args_.num_tasks))
toc = time.time()
self.debug_('validating smiles: {}s'.format(toc - tic))
self.debug_(f'Predicting...')
tic = time.time()
shared_dict = {}
for model in self.models_:
model_preds = self.inference(
model=model,
data=test_data,
batch_size=self.args_.batch_size,
scaler=self.scaler_,
shared_dict=shared_dict,
args=self.args_
)
sum_preds += np.array(model_preds)
toc = time.time()
self.debug_('predicting: {}s'.format(toc - tic))
avg_preds = sum_preds / len(self.checkpoint_paths_)
avg_preds = self.postprocessing(task=task, smiles=smiles, preds=avg_preds)
avg_preds = avg_preds.tolist()
assert len(test_data) == len(avg_preds)
test_smiles = test_data.smiles()
res = {}
for i in range(len(avg_preds)):
res[test_smiles[i]] = avg_preds[i]
return {'task': task, 'task_score': res}
| 38.854651 | 115 | 0.60422 | import os
import time
import math
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True)
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import Namespace
from typing import List
from dglt.data.dataset.molecular import MoleculeDataset
from dglt.data.transformer.scaler import StandardScaler
from dglt.data.transformer.collator import MolCollator
from dglt.data.dataset.utils import get_data, get_data_from_smiles
from dglt.utils import load_args, load_checkpoint, load_scalers
from deploy import get_newest_train_args
from third_party.dimorphite_dl.acid_base import mol_cls
class MoleProp(object):
def __init__(self, checkpoint_dir, debug=print):
self.debug_ = debug
self.checkpoint_paths_ = []
for root, _, files in os.walk(checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
self.checkpoint_paths_.append(os.path.join(root, fname))
def load_model(self, args: Namespace):
self.scaler_, self.features_scaler_ = load_scalers(self.checkpoint_paths_[0])
self.train_args = load_args(self.checkpoint_paths_[0])
self.args_ = args
for key, value in vars(self.train_args).items():
if not hasattr(self.args_, key):
setattr(self.args_, key, value)
newest_train_args = get_newest_train_args()
for key, value in vars(newest_train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
if args.features_path:
args.features_path = None
args.features_generator = ['rdkit_2d_normalized']
self.models_ = []
for checkpoint_path in tqdm(self.checkpoint_paths_, total=len(self.checkpoint_paths_)):
self.models_.append(load_checkpoint(checkpoint_path, cuda=self.args_.cuda, current_args=self.args_))
def inference(self,
model: nn.Module,
data: MoleculeDataset,
args,
batch_size: int,
shared_dict,
scaler: StandardScaler = None
) -> List[List[float]]:
model.eval()
args.bond_drop_rate = 0
preds = []
iter_count = 0
mol_collator = MolCollator(args=args, shared_dict=shared_dict)
mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=mol_collator)
for i, item in enumerate(mol_loader):
smiles_batch, batch, features_batch, mask, _ = item
with torch.no_grad():
batch_preds = model(batch, features_batch)
iter_count += args.batch_size
batch_preds = batch_preds.data.cpu().numpy()
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
batch_preds = batch_preds.tolist()
preds.extend(batch_preds)
return preds
def postprocessing(self, task: str = None, smiles: List[str] = None, preds: np.ndarray = None):
if task == 'caco2':
for i in range(preds.shape[0]):
if preds[i] is not None:
for j in range(len(preds[i])):
preds[i][j] = (math.pow(10, preds[i][j]) - 1) / 10
elif task == 'pka':
acid_base = mol_cls(smiles)
preds[acid_base == None] = np.nan
preds = np.column_stack((preds, np.array(acid_base, dtype=np.float)))
elif task == 'ppb':
preds[preds > 1] = 1
preds[preds < 0] = 0
return preds
def predict(self, task: str = None, smiles: List[str] = None):
self.debug_('Loading data')
tic = time.time()
self.args_.max_workers = 30
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=True, args=self.args_)
else:
test_data = get_data(path=self.args_.input_file, args=self.args_,
use_compound_names=self.args_.use_compound_names,
skip_invalid_smiles=True)
toc = time.time()
self.debug_('loading data: {}s'.format(toc - tic))
self.debug_('Validating SMILES')
tic = time.time()
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
if len(test_data) == 0:
return [None] * len(full_data)
if self.train_args.features_scaling:
test_data.normalize_features(self.features_scaler)
sum_preds = np.zeros((len(test_data), self.args_.num_tasks))
toc = time.time()
self.debug_('validating smiles: {}s'.format(toc - tic))
self.debug_(f'Predicting...')
tic = time.time()
shared_dict = {}
for model in self.models_:
model_preds = self.inference(
model=model,
data=test_data,
batch_size=self.args_.batch_size,
scaler=self.scaler_,
shared_dict=shared_dict,
args=self.args_
)
sum_preds += np.array(model_preds)
toc = time.time()
self.debug_('predicting: {}s'.format(toc - tic))
avg_preds = sum_preds / len(self.checkpoint_paths_)
avg_preds = self.postprocessing(task=task, smiles=smiles, preds=avg_preds)
avg_preds = avg_preds.tolist()
assert len(test_data) == len(avg_preds)
test_smiles = test_data.smiles()
res = {}
for i in range(len(avg_preds)):
res[test_smiles[i]] = avg_preds[i]
return {'task': task, 'task_score': res}
| true | true |
f72b4b922b786b9836eb43cd19f849b1ef9f3014 | 2,011 | py | Python | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 5 | 2021-06-15T05:34:01.000Z | 2021-08-17T12:12:34.000Z | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 1 | 2021-06-15T13:24:48.000Z | 2021-06-15T13:24:48.000Z | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 7 | 2021-06-15T05:36:47.000Z | 2021-09-30T08:00:08.000Z | import time
import numpy as np
from compute_overlap import compute_overlap
def compute_overlap_np(a: np.array, b: np.array) -> np.array:
"""
Args
a: (N, 4) ndarray of float [xmin, ymin, xmax, ymax]
b: (K, 4) ndarray of float [xmin, ymin, xmax, ymax]
Returns
overlaps: (N, K) ndarray of overlap between boxes a and boxes b
"""
N, K = len(a), len(b)
overlaps = np.zeros(shape=(N, K))
for n in range(N):
a_area = (a[n, 2] - a[n, 0]) * (a[n, 3] - a[n, 1])
for k in range(K):
dx = min(a[n, 2], b[k, 2]) - max(a[n, 0], b[k, 0])
if dx >= 0:
dy = min(a[n, 3], b[k, 3]) - max(a[n, 1], b[k, 1])
if dy >= 0:
b_area = (b[k, 2] - b[k, 0]) * (b[k, 3] - b[k, 1])
intersection = max(dx, 0) * max(dy, 0)
union = a_area + b_area - intersection
overlaps[n, k] = intersection / union
return overlaps
def test_overlap_1():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[2, 2, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 1. / 7
def test_overlap_0():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[3, 3, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 0.
def test_overlap_n(a_len, b_len, box_size=100):
a = np.random.randint(0, 3000, (a_len, 4))
b = np.random.randint(0, 4000, (b_len, 4))
a = a.astype(np.float)
b = b.astype(np.float)
a[:, 2] = a[:, 0] + box_size
b[:, 2] = b[:, 0] + box_size
a[:, 3] = a[:, 1] + box_size
b[:, 3] = b[:, 1] + box_size
t1 = time.time()
o_np = compute_overlap_np(a, b)
t2 = time.time()
o_c = compute_overlap(a, b)
t3 = time.time()
assert np.array_equal(o_np, o_c)
print('Numpy time = ', t2 - t1)
print('C_ext time = ', t3 - t2)
if __name__ == '__main__':
test_overlap_1()
test_overlap_0()
test_overlap_n(100, 5, 300)
| 29.144928 | 71 | 0.513178 | import time
import numpy as np
from compute_overlap import compute_overlap
def compute_overlap_np(a: np.array, b: np.array) -> np.array:
N, K = len(a), len(b)
overlaps = np.zeros(shape=(N, K))
for n in range(N):
a_area = (a[n, 2] - a[n, 0]) * (a[n, 3] - a[n, 1])
for k in range(K):
dx = min(a[n, 2], b[k, 2]) - max(a[n, 0], b[k, 0])
if dx >= 0:
dy = min(a[n, 3], b[k, 3]) - max(a[n, 1], b[k, 1])
if dy >= 0:
b_area = (b[k, 2] - b[k, 0]) * (b[k, 3] - b[k, 1])
intersection = max(dx, 0) * max(dy, 0)
union = a_area + b_area - intersection
overlaps[n, k] = intersection / union
return overlaps
def test_overlap_1():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[2, 2, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 1. / 7
def test_overlap_0():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[3, 3, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 0.
def test_overlap_n(a_len, b_len, box_size=100):
a = np.random.randint(0, 3000, (a_len, 4))
b = np.random.randint(0, 4000, (b_len, 4))
a = a.astype(np.float)
b = b.astype(np.float)
a[:, 2] = a[:, 0] + box_size
b[:, 2] = b[:, 0] + box_size
a[:, 3] = a[:, 1] + box_size
b[:, 3] = b[:, 1] + box_size
t1 = time.time()
o_np = compute_overlap_np(a, b)
t2 = time.time()
o_c = compute_overlap(a, b)
t3 = time.time()
assert np.array_equal(o_np, o_c)
print('Numpy time = ', t2 - t1)
print('C_ext time = ', t3 - t2)
if __name__ == '__main__':
test_overlap_1()
test_overlap_0()
test_overlap_n(100, 5, 300)
| true | true |
f72b4bbd4fb629bc220cd90dcdf55c738b02e203 | 4,032 | py | Python | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | 2 | 2019-01-21T02:54:19.000Z | 2019-01-21T02:54:58.000Z | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | null | null | null | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Implement the policy value network using numpy, so that we can play with the
trained AI model without installing any DL framwork
@author: Junxiao Song
"""
from __future__ import print_function
import numpy as np
# some utility functions
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
def relu(X):
out = np.maximum(X, 0)
return out
def conv_forward(X, W, b, stride=1, padding=1):
n_filters, d_filter, h_filter, w_filter = W.shape
# theano conv2d flips the filters (rotate 180 degree) first
# while doing the calculation
W = W[:, :, ::-1, ::-1]
n_x, d_x, h_x, w_x = X.shape
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = int(h_out), int(w_out)
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = W.reshape(n_filters, -1)
out = (np.dot(W_col, X_col).T + b).T
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def fc_forward(X, W, b):
out = np.dot(X, W) + b
return out
def get_im2col_indices(x_shape, field_height,
field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height,
field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
class PolicyValueNet():
"""policy-value network in numpy """
def __init__(self, board_width, board_height, net_params):
self.board_width = board_width
self.board_height = board_height
self.params = net_params
def policy_value_fn(self, board):
"""
input: board
output: a list of (action, probability) tuples for each available
action and the score of the board state
"""
legal_positions = board.availables
current_state = board.current_state()
X = current_state.reshape(-1, 4, self.board_width, self.board_height)
# first 3 conv layers with ReLu nonlinearity
for i in [0, 2, 4]:
X = relu(conv_forward(X, self.params[i], self.params[i+1]))
# policy head
X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))
X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])
act_probs = softmax(X_p)
# value head
X_v = relu(conv_forward(X, self.params[10],
self.params[11], padding=0))
X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))
value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value
| 34.758621 | 79 | 0.615575 |
from __future__ import print_function
import numpy as np
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
def relu(X):
out = np.maximum(X, 0)
return out
def conv_forward(X, W, b, stride=1, padding=1):
n_filters, d_filter, h_filter, w_filter = W.shape
W = W[:, :, ::-1, ::-1]
n_x, d_x, h_x, w_x = X.shape
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = int(h_out), int(w_out)
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = W.reshape(n_filters, -1)
out = (np.dot(W_col, X_col).T + b).T
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def fc_forward(X, W, b):
out = np.dot(X, W) + b
return out
def get_im2col_indices(x_shape, field_height,
field_width, padding=1, stride=1):
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height,
field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
class PolicyValueNet():
def __init__(self, board_width, board_height, net_params):
self.board_width = board_width
self.board_height = board_height
self.params = net_params
def policy_value_fn(self, board):
legal_positions = board.availables
current_state = board.current_state()
X = current_state.reshape(-1, 4, self.board_width, self.board_height)
for i in [0, 2, 4]:
X = relu(conv_forward(X, self.params[i], self.params[i+1]))
X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))
X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])
act_probs = softmax(X_p)
X_v = relu(conv_forward(X, self.params[10],
self.params[11], padding=0))
X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))
value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value
| true | true |
f72b4bf2acff866c3c0619b8bab73f2ba21e89f0 | 581 | py | Python | project euler solutions/Problem_027.py | helq/old_code | a432faf1b340cb379190a2f2b11b997b02d1cd8d | [
"CC0-1.0"
] | null | null | null | project euler solutions/Problem_027.py | helq/old_code | a432faf1b340cb379190a2f2b11b997b02d1cd8d | [
"CC0-1.0"
] | 4 | 2020-03-10T19:20:21.000Z | 2021-06-07T15:39:48.000Z | project euler solutions/Problem_027.py | helq/old_code | a432faf1b340cb379190a2f2b11b997b02d1cd8d | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from math import sqrt
def is_prime(a):
a = abs(int(a))
for i in range( 2, int(sqrt(a)) + 1 ):
if a % i == 0:
return False
return True
def num_primes(a,b):
i = 0
while True:
if not is_prime( i*(i + a) + b ):
break
else:
i += 1
return i
max_num_primes = 0
for i in range(-999,1000):
for j in range(-999,1000):
n = num_primes(i,j)
if n > max_num_primes:
max_num_primes = n
max_mult = i * j
print max_mult | 19.366667 | 42 | 0.500861 |
from math import sqrt
def is_prime(a):
a = abs(int(a))
for i in range( 2, int(sqrt(a)) + 1 ):
if a % i == 0:
return False
return True
def num_primes(a,b):
i = 0
while True:
if not is_prime( i*(i + a) + b ):
break
else:
i += 1
return i
max_num_primes = 0
for i in range(-999,1000):
for j in range(-999,1000):
n = num_primes(i,j)
if n > max_num_primes:
max_num_primes = n
max_mult = i * j
print max_mult | false | true |
f72b4d5a2a367abecd980532b8d4750d10b4ca89 | 1,961 | py | Python | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 810 | 2018-12-25T15:16:11.000Z | 2020-05-14T09:49:40.000Z | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 701 | 2018-12-21T05:18:43.000Z | 2020-05-16T01:30:21.000Z | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 155 | 2018-12-22T11:05:04.000Z | 2020-05-14T07:33:41.000Z | from datetime import datetime
import boto3
from feast import utils
from feast.infra.online_stores.helpers import compute_entity_id
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
def _create_n_customer_test_samples(n=10):
return [
(
EntityKeyProto(
join_keys=["customer"], entity_values=[ValueProto(string_val=str(i))]
),
{
"avg_orders_day": ValueProto(float_val=1.0),
"name": ValueProto(string_val="John"),
"age": ValueProto(int64_val=3),
},
datetime.utcnow(),
None,
)
for i in range(n)
]
def _create_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.create_table(
TableName=f"{project}.{tbl_name}",
KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "entity_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
def _delete_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.delete_table(TableName=f"{project}.{tbl_name}")
def _insert_data_test_table(data, project, tbl_name, region):
dynamodb_resource = boto3.resource("dynamodb", region_name=region)
table_instance = dynamodb_resource.Table(f"{project}.{tbl_name}")
for entity_key, features, timestamp, created_ts in data:
entity_id = compute_entity_id(entity_key)
with table_instance.batch_writer() as batch:
batch.put_item(
Item={
"entity_id": entity_id,
"event_ts": str(utils.make_tzaware(timestamp)),
"values": {k: v.SerializeToString() for k, v in features.items()},
}
)
| 34.403509 | 86 | 0.63182 | from datetime import datetime
import boto3
from feast import utils
from feast.infra.online_stores.helpers import compute_entity_id
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
def _create_n_customer_test_samples(n=10):
return [
(
EntityKeyProto(
join_keys=["customer"], entity_values=[ValueProto(string_val=str(i))]
),
{
"avg_orders_day": ValueProto(float_val=1.0),
"name": ValueProto(string_val="John"),
"age": ValueProto(int64_val=3),
},
datetime.utcnow(),
None,
)
for i in range(n)
]
def _create_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.create_table(
TableName=f"{project}.{tbl_name}",
KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "entity_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
def _delete_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.delete_table(TableName=f"{project}.{tbl_name}")
def _insert_data_test_table(data, project, tbl_name, region):
dynamodb_resource = boto3.resource("dynamodb", region_name=region)
table_instance = dynamodb_resource.Table(f"{project}.{tbl_name}")
for entity_key, features, timestamp, created_ts in data:
entity_id = compute_entity_id(entity_key)
with table_instance.batch_writer() as batch:
batch.put_item(
Item={
"entity_id": entity_id,
"event_ts": str(utils.make_tzaware(timestamp)),
"values": {k: v.SerializeToString() for k, v in features.items()},
}
)
| true | true |
f72b4fcb126c9560cd993ddf3c97358bf4458c21 | 2,011 | py | Python | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bbcp(Package):
"""Securely and quickly copy data from source to target"""
homepage = "http://www.slac.stanford.edu/~abh/bbcp/"
version('git', git='http://www.slac.stanford.edu/~abh/bbcp/bbcp.git',
branch="master")
depends_on('zlib')
depends_on('openssl')
def install(self, spec, prefix):
cd("src")
make()
# BBCP wants to build the executable in a directory whose name depends
# on the system type
makesname = Executable("../MakeSname")
bbcp_executable_path = "../bin/%s/bbcp" % makesname(
output=str).rstrip("\n")
destination_path = "%s/bin/" % prefix
mkdirp(destination_path)
install(bbcp_executable_path, destination_path)
| 41.040816 | 78 | 0.651914 | true | true | |
f72b4ff25106f6a06c072e4b810373c1bc5e5e95 | 1,964 | py | Python | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | null | null | null | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | 1 | 2020-02-16T10:48:55.000Z | 2020-02-16T11:06:36.000Z | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | 4 | 2019-11-04T15:00:55.000Z | 2020-03-02T13:36:17.000Z | from django.contrib import admin
from django import forms
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
import sdap.tools.forms as tool_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.apps import apps
class ExpressionStudyAdmin(admin.ModelAdmin, DynamicArrayMixin):
fieldsets = [
(None, {'fields': ['database','article', 'pmid', 'status', 'ome', 'experimental_design', 'topics', 'tissues', 'sex',
'dev_stage', 'age', 'antibody', 'mutant', 'cell_sorted', 'keywords', 'samples_count', 'read_groups', 'edit_groups',
]
}
),
]
class ExpressionDataAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name', 'file','gene_type','gene_number', 'technology', 'species' ,'cell_number', 'study'
]
}
),
]
list_display = ['name', 'class_name']
class GeneAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['gene_id','tax_id','symbol','synonyms','description','homolog_id','ensemble_id'
]
}
),
]
list_display = ['symbol', 'gene_id']
search_fields = ['symbol']
class GeneListAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name','created_by','species','genes'
]
}
),
]
autocomplete_fields = ['genes']
admin.site.register(ExpressionStudy, ExpressionStudyAdmin)
admin.site.register(ExpressionData, ExpressionDataAdmin)
admin.site.register(GeneList, GeneListAdmin)
admin.site.register(Gene, GeneAdmin)
admin.site.register(Database)
| 32.733333 | 155 | 0.556517 | from django.contrib import admin
from django import forms
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
import sdap.tools.forms as tool_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.apps import apps
class ExpressionStudyAdmin(admin.ModelAdmin, DynamicArrayMixin):
fieldsets = [
(None, {'fields': ['database','article', 'pmid', 'status', 'ome', 'experimental_design', 'topics', 'tissues', 'sex',
'dev_stage', 'age', 'antibody', 'mutant', 'cell_sorted', 'keywords', 'samples_count', 'read_groups', 'edit_groups',
]
}
),
]
class ExpressionDataAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name', 'file','gene_type','gene_number', 'technology', 'species' ,'cell_number', 'study'
]
}
),
]
list_display = ['name', 'class_name']
class GeneAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['gene_id','tax_id','symbol','synonyms','description','homolog_id','ensemble_id'
]
}
),
]
list_display = ['symbol', 'gene_id']
search_fields = ['symbol']
class GeneListAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name','created_by','species','genes'
]
}
),
]
autocomplete_fields = ['genes']
admin.site.register(ExpressionStudy, ExpressionStudyAdmin)
admin.site.register(ExpressionData, ExpressionDataAdmin)
admin.site.register(GeneList, GeneListAdmin)
admin.site.register(Gene, GeneAdmin)
admin.site.register(Database)
| true | true |
f72b50a50d6a2732a98caa1aae33253e14cfd9d0 | 1,772 | py | Python | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | 10 | 2020-11-09T18:12:43.000Z | 2021-12-17T16:48:54.000Z | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | null | null | null | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | 3 | 2021-01-26T12:41:08.000Z | 2021-12-16T04:33:11.000Z | # Copyright 2020 HQS Quantum Simulations GmbH
# Reza Ghafarian Shirazi, Thilo Mast.
# reza.shirazi@quantumsimulations.de
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf import gto, scf
import asf
mol = gto.Mole()
mol.atom = """
Fe -0.0000000 0.0000000 0.0000000
O 0.0000000 2.0622910 0.0000000
H 0.7919274 2.6471973 0.0000000
H -0.7919274 2.6471973 0.0000000
O -0.0000000 0.0000000 2.0622910
H -0.0000000 0.7919274 2.6471973
H 0.0000000 -0.7919274 2.6471973
O 2.0622910 -0.0000000 -0.0000000
H 2.6471973 -0.0000000 0.7919274
H 2.6471973 -0.0000000 -0.7919274
O -0.0000000 -2.0622910 0.0000000
H -0.7919274 -2.6471973 -0.0000000
H 0.7919274 -2.6471973 -0.0000000
O 0.0000000 0.0000000 -2.0622910
H 0.0000000 -0.7919274 -2.6471973
H -0.0000000 0.7919274 -2.6471973
O -2.0622910 0.0000000 0.0000000
H -2.6471973 0.0000000 -0.7919274
H -2.6471973 0.0000000 0.7919274
"""
mol.basis = {'default': 'minao'}
mol.charge = 3
mol.spin = 3
mol.verbose = 0
mol.build()
# UHF for UNOs
mf = scf.RHF(mol).run(max_cycle=100)
mo_new = mf.stability()[0]
while mo_new is not mf.mo_coeff:
mf.kernel(dm0=mf.make_rdm1(mo_coeff=mo_new))
mo_new = mf.stability()[0]
# Call the wrapper function.
ASF = asf.asf()
ele, mos = ASF.fas_no_guess(mf, nat_type='MP2', machine_limit=11)
| 30.551724 | 74 | 0.738149 |
from pyscf import gto, scf
import asf
mol = gto.Mole()
mol.atom = """
Fe -0.0000000 0.0000000 0.0000000
O 0.0000000 2.0622910 0.0000000
H 0.7919274 2.6471973 0.0000000
H -0.7919274 2.6471973 0.0000000
O -0.0000000 0.0000000 2.0622910
H -0.0000000 0.7919274 2.6471973
H 0.0000000 -0.7919274 2.6471973
O 2.0622910 -0.0000000 -0.0000000
H 2.6471973 -0.0000000 0.7919274
H 2.6471973 -0.0000000 -0.7919274
O -0.0000000 -2.0622910 0.0000000
H -0.7919274 -2.6471973 -0.0000000
H 0.7919274 -2.6471973 -0.0000000
O 0.0000000 0.0000000 -2.0622910
H 0.0000000 -0.7919274 -2.6471973
H -0.0000000 0.7919274 -2.6471973
O -2.0622910 0.0000000 0.0000000
H -2.6471973 0.0000000 -0.7919274
H -2.6471973 0.0000000 0.7919274
"""
mol.basis = {'default': 'minao'}
mol.charge = 3
mol.spin = 3
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).run(max_cycle=100)
mo_new = mf.stability()[0]
while mo_new is not mf.mo_coeff:
mf.kernel(dm0=mf.make_rdm1(mo_coeff=mo_new))
mo_new = mf.stability()[0]
ASF = asf.asf()
ele, mos = ASF.fas_no_guess(mf, nat_type='MP2', machine_limit=11)
| true | true |
f72b50aeba2955ce6ef70f323284adca857cfc4f | 3,088 | py | Python | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | null | null | null | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | 3 | 2021-06-08T22:25:30.000Z | 2022-01-13T03:18:29.000Z | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from math import sqrt
from scipy.spatial import distance
from yolo_app.etc.config import config
def crop_image(save_path, img, xywh):
x = xywh[0]
y = xywh[1]
w = xywh[2]
h = xywh[3]
crop_img = img[y:y + h, x:x + w]
cv2.imwrite(save_path, crop_img)
def np_xyxy2xywh(xyxy, data_type=int):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
xywh = np.zeros_like(xyxy)
x1 = xyxy[0]
y1 = xyxy[1]
x2 = xyxy[2]
y2 = xyxy[3]
xywh[0] = xyxy[0]
xywh[1] = xyxy[1]
xywh[2] = data_type(abs(x2 - x1))
xywh[3] = data_type(abs(y1 - y2))
return xywh
def torch2np_xyxy(xyxy, data_type=int):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
# CPU Mode
try:
np_xyxy = np.zeros_like(xyxy)
# GPU Mode
except:
np_xyxy = np.zeros_like(xyxy.data.cpu().numpy())
np_xyxy[0] = data_type(xyxy[0])
np_xyxy[1] = data_type(xyxy[1])
np_xyxy[2] = data_type(xyxy[2])
np_xyxy[3] = data_type(xyxy[3])
return np_xyxy
def get_det_xyxy(det):
numpy_xyxy = torch2np_xyxy(det[:4])
return numpy_xyxy
# Merged of 2 bounding boxes (xyxy and xyxy)
def get_mbbox(obj_1, obj_2):
box1_x1 = obj_1[0]
box1_y1 = obj_1[1]
box1_x2 = obj_1[2]
box1_y2 = obj_1[3]
box2_x1 = obj_2[0]
box2_y1 = obj_2[1]
box2_x2 = obj_2[2]
box2_y2 = obj_2[3]
mbbox = [
min(box1_x1, box2_x1),
min(box1_y1, box2_y1),
max(box1_x2, box2_x2),
max(box1_y2, box2_y2)
]
return mbbox
def np_xyxy2centroid(xyxy):
centroid_x = (xyxy[0] + xyxy[2]) / 2
centroid_y = (xyxy[1] + xyxy[3]) / 2
return np.asarray([centroid_x, centroid_y])
def get_xyxy_distance(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(o1cx_o2cx + o1cy_o2cy)
return dist
def get_xyxy_distance_manhattan(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(distance.cityblock(o1cx_o2cx, o1cy_o2cy))
return dist
def save_txt(save_path, txt_format, bbox_xyxy=None, w_type='a', img_ext=".png", cls=None, conf=1.0):
txt_path = save_path.replace(img_ext, '')
with open(txt_path + '.txt', w_type) as file:
if bbox_xyxy is None:
file.write("")
else:
if cls is None:
cls = config["bbox_config"]["default_label"]
if txt_format == "default":
file.write(('%g ' * 6 + '\n') % (bbox_xyxy, cls, conf))
elif txt_format == "cartucho":
str_output = cls + " "
str_output += str(conf) + " "
str_output += str(int(bbox_xyxy[0])) + " " + \
str(int(bbox_xyxy[1])) + " " + \
str(int(bbox_xyxy[2])) + " " + \
str(int(bbox_xyxy[3])) + "\n"
file.write(str_output)
else:
pass
| 26.393162 | 100 | 0.562176 | import cv2
import numpy as np
from math import sqrt
from scipy.spatial import distance
from yolo_app.etc.config import config
def crop_image(save_path, img, xywh):
x = xywh[0]
y = xywh[1]
w = xywh[2]
h = xywh[3]
crop_img = img[y:y + h, x:x + w]
cv2.imwrite(save_path, crop_img)
def np_xyxy2xywh(xyxy, data_type=int):
xywh = np.zeros_like(xyxy)
x1 = xyxy[0]
y1 = xyxy[1]
x2 = xyxy[2]
y2 = xyxy[3]
xywh[0] = xyxy[0]
xywh[1] = xyxy[1]
xywh[2] = data_type(abs(x2 - x1))
xywh[3] = data_type(abs(y1 - y2))
return xywh
def torch2np_xyxy(xyxy, data_type=int):
try:
np_xyxy = np.zeros_like(xyxy)
except:
np_xyxy = np.zeros_like(xyxy.data.cpu().numpy())
np_xyxy[0] = data_type(xyxy[0])
np_xyxy[1] = data_type(xyxy[1])
np_xyxy[2] = data_type(xyxy[2])
np_xyxy[3] = data_type(xyxy[3])
return np_xyxy
def get_det_xyxy(det):
numpy_xyxy = torch2np_xyxy(det[:4])
return numpy_xyxy
def get_mbbox(obj_1, obj_2):
box1_x1 = obj_1[0]
box1_y1 = obj_1[1]
box1_x2 = obj_1[2]
box1_y2 = obj_1[3]
box2_x1 = obj_2[0]
box2_y1 = obj_2[1]
box2_x2 = obj_2[2]
box2_y2 = obj_2[3]
mbbox = [
min(box1_x1, box2_x1),
min(box1_y1, box2_y1),
max(box1_x2, box2_x2),
max(box1_y2, box2_y2)
]
return mbbox
def np_xyxy2centroid(xyxy):
centroid_x = (xyxy[0] + xyxy[2]) / 2
centroid_y = (xyxy[1] + xyxy[3]) / 2
return np.asarray([centroid_x, centroid_y])
def get_xyxy_distance(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(o1cx_o2cx + o1cy_o2cy)
return dist
def get_xyxy_distance_manhattan(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(distance.cityblock(o1cx_o2cx, o1cy_o2cy))
return dist
def save_txt(save_path, txt_format, bbox_xyxy=None, w_type='a', img_ext=".png", cls=None, conf=1.0):
txt_path = save_path.replace(img_ext, '')
with open(txt_path + '.txt', w_type) as file:
if bbox_xyxy is None:
file.write("")
else:
if cls is None:
cls = config["bbox_config"]["default_label"]
if txt_format == "default":
file.write(('%g ' * 6 + '\n') % (bbox_xyxy, cls, conf))
elif txt_format == "cartucho":
str_output = cls + " "
str_output += str(conf) + " "
str_output += str(int(bbox_xyxy[0])) + " " + \
str(int(bbox_xyxy[1])) + " " + \
str(int(bbox_xyxy[2])) + " " + \
str(int(bbox_xyxy[3])) + "\n"
file.write(str_output)
else:
pass
| true | true |
f72b5313853fd9dfc01ec04630bfc7a8a3c1dfba | 2,762 | py | Python | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | from generate import *
from datetime import datetime
def main():
''' e.g.
python ./generate.py --length=512
--nsamples=1
--prefix=[MASK]哈利站在窗边
--tokenizer_path cache/vocab_small.txt
--topk 40 --model_path model/model_epoch29
--save_samples --save_samples_path result/20210915_29_1135
--model_config model/model_epoch29/config.json --repetition_penalty 1.05 --temperature 1.1
'''
parser = argparse.ArgumentParser()
parser.add_argument('--key', default='intro', type=str, required=False, help='哪个模型')
parser.add_argument('--model_v', default='-1', type=str, required=False, help='第几个模型')
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')
parser.add_argument('--length', default=1024, type=int, required=False, help='生成长度')
parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')
parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本')
parser.add_argument('--temperature', default=1.1, type=float, required=False, help='生成温度')
parser.add_argument('--topk', default=20, type=int, required=False, help='最高几选一')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径')
parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')
parser.add_argument('--prefix', default='哈利站在窗边', type=str, required=False, help='生成文章的开头')
parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')
parser.add_argument('--save_samples', default=True, help='保存产生的样本')
parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径")
parser.add_argument('--repetition_penalty', default=1.05, type=float, required=False)
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.model_v != '-1':
args.model_path = '{}/model_epoch{}'.format(args.model_path.split('/')[0], args.model_v)
else:
args.model_path = args.model_path
t = str(datetime.now())
d = ''.join('_'.join(''.join(t.split(":")[:-1]).split(' ')).split('-'))
args.save_samples_path = 'result_{}/{}_v{}'.format(args.key, d, args.model_v)
Generate().run(args)
if __name__ == '__main__':
main()
| 54.156863 | 115 | 0.68139 | from generate import *
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--key', default='intro', type=str, required=False, help='哪个模型')
parser.add_argument('--model_v', default='-1', type=str, required=False, help='第几个模型')
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')
parser.add_argument('--length', default=1024, type=int, required=False, help='生成长度')
parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')
parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本')
parser.add_argument('--temperature', default=1.1, type=float, required=False, help='生成温度')
parser.add_argument('--topk', default=20, type=int, required=False, help='最高几选一')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径')
parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')
parser.add_argument('--prefix', default='哈利站在窗边', type=str, required=False, help='生成文章的开头')
parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')
parser.add_argument('--save_samples', default=True, help='保存产生的样本')
parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径")
parser.add_argument('--repetition_penalty', default=1.05, type=float, required=False)
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.model_v != '-1':
args.model_path = '{}/model_epoch{}'.format(args.model_path.split('/')[0], args.model_v)
else:
args.model_path = args.model_path
t = str(datetime.now())
d = ''.join('_'.join(''.join(t.split(":")[:-1]).split(' ')).split('-'))
args.save_samples_path = 'result_{}/{}_v{}'.format(args.key, d, args.model_v)
Generate().run(args)
if __name__ == '__main__':
main()
| true | true |
f72b5336226a539fe7cd56683840a850c1179b1c | 8,536 | py | Python | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | import hashlib
from typing import Iterable, Tuple
from loguru import logger
class Col:
named: "NamedCols"
def __init__(self, r: int, g: int, b: int, clip: bool = False, fix_numeric_type: bool = True):
self._clip = clip
self._fix_numeric_type = fix_numeric_type
self.r = r # Note this is calling the setter
self.g = g
self.b = b
@property
def r(self) -> int:
return self._r
@r.setter
def r(self, r: int) -> None:
self._r = self._validate_uint8(r)
@property
def g(self) -> int:
return self._g
def __eq__(self, c: "Col") -> bool:
return self.r == c.r and self.g == c.g and self.b == c.b
@g.setter
def g(self, g: int) -> None:
self._g = self._validate_uint8(g)
@property
def b(self) -> int:
return self._b
@b.setter
def b(self, b: int) -> None:
self._b = self._validate_uint8(b)
@property
def rgb(self) -> Tuple[int, int, int]:
return (self.r, self.g, self.b)
@property
def bgr(self) -> Tuple[int, int, int]:
return (self.b, self.g, self.r)
def _validate_uint8(self, c: int) -> int:
if c is None:
raise ValueError("Color r/g/b must not be None")
if not isinstance(c, int):
if self._fix_numeric_type:
logger.debug("Color r/g/b is meant to be int, so trying to coerce to int")
c = int(c)
else:
raise ValueError("Color r/g/b is meant to be int but it isn't.")
# Should always be >= 0
if c < 0 or c > 255:
if self._clip:
c = min(255, max(0, c))
logger.debug("Color r/g/b must be 0 - 255 but it isn't, so clipping to this range.")
else:
raise ValueError("Color r/g/b must be 0 - 255 but it isn't.")
# Phew, done:
return c
def pick_col(s: str) -> Col:
if not isinstance(s, str):
raise RuntimeError("Please provide a string argument to pick_col")
# Approach based on https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L737
# i.e. hash the string representation
digest = hashlib.md5(s.encode("utf8")).hexdigest()
n = int(len(digest) / 3)
mx = 2 ** (4 * n) - 1
rgb = (int(int(digest[i * n : (i + 1) * n], 16) / mx * 256) for i in range(3))
return Col(*rgb)
class DivergingPalette:
def __init__(self, labels: Iterable[str] = None):
# ColorBrewer Diverging 12-class Paired
self._cols = (
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
)
# Create the lookup (with our own Col objects so they can be mutated)
self._col_map = {}
if labels is not None:
for idx, label in enumerate(labels):
self._col_map[label] = Col(*self._cols[idx % len(self._cols)])
def col(self, label: str) -> Col:
if label not in self._col_map:
idx = len(self._col_map) % len(self._cols)
self._col_map[label] = Col(*self._cols[idx])
return self._col_map[label]
class NamedCols:
alice_blue = Col(240, 248, 255)
antique_white = Col(250, 235, 215)
aqua = Col(0, 255, 255)
aqua_marine = Col(127, 255, 212)
aware_blue_dark = Col(0, 81, 155)
aware_blue_light = Col(87, 200, 231)
azure = Col(240, 255, 255)
beige = Col(245, 245, 220)
bisque = Col(255, 228, 196)
black = Col(0, 0, 0)
blanched_almond = Col(255, 235, 205)
blue = Col(0, 0, 255)
blue_violet = Col(138, 43, 226)
brown = Col(165, 42, 42)
burly_wood = Col(222, 184, 135)
cadet_blue = Col(95, 158, 160)
chart_reuse = Col(127, 255, 0)
chocolate = Col(210, 105, 30)
coral = Col(255, 127, 80)
corn_flower_blue = Col(100, 149, 237)
corn_silk = Col(255, 248, 220)
crimson = Col(220, 20, 60)
cyan = Col(0, 255, 255)
dark_blue = Col(0, 0, 139)
dark_cyan = Col(0, 139, 139)
dark_golden_rod = Col(184, 134, 11)
dark_gray = Col(169, 169, 169)
dark_green = Col(0, 100, 0)
dark_grey = Col(169, 169, 169)
dark_khaki = Col(189, 183, 107)
dark_magenta = Col(139, 0, 139)
dark_olive_green = Col(85, 107, 47)
dark_orange = Col(255, 140, 0)
dark_orchid = Col(153, 50, 204)
dark_red = Col(139, 0, 0)
dark_salmon = Col(233, 150, 122)
dark_sea_green = Col(143, 188, 143)
dark_slate_blue = Col(72, 61, 139)
dark_slate_gray = Col(47, 79, 79)
dark_turquoise = Col(0, 206, 209)
dark_violet = Col(148, 0, 211)
deep_pink = Col(255, 20, 147)
deep_sky_blue = Col(0, 191, 255)
dim_gray = Col(105, 105, 105)
dim_grey = Col(105, 105, 105)
dodger_blue = Col(30, 144, 255)
firebrick = Col(178, 34, 34)
floral_white = Col(255, 250, 240)
forest_green = Col(34, 139, 34)
fuchsia = Col(255, 0, 255)
gainsboro = Col(220, 220, 220)
ghost_white = Col(248, 248, 255)
gold = Col(255, 215, 0)
golden_rod = Col(218, 165, 32)
gray = Col(128, 128, 128)
green = Col(0, 128, 0)
green_yellow = Col(173, 255, 47)
grey = Col(128, 128, 128)
honeydew = Col(240, 255, 240)
hot_pink = Col(255, 105, 180)
indian_red = Col(205, 92, 92)
indigo = Col(75, 0, 130)
ivory = Col(255, 255, 240)
khaki = Col(240, 230, 140)
lavender = Col(230, 230, 250)
lavender_blush = Col(255, 240, 245)
lawn_green = Col(124, 252, 0)
lemon_chiffon = Col(255, 250, 205)
light_blue = Col(173, 216, 230)
light_coral = Col(240, 128, 128)
light_cyan = Col(224, 255, 255)
light_golden_rod_yellow = Col(250, 250, 210)
light_gray = Col(211, 211, 211)
light_green = Col(144, 238, 144)
light_grey = Col(211, 211, 211)
light_pink = Col(255, 182, 193)
light_salmon = Col(255, 160, 122)
light_sea_green = Col(32, 178, 170)
light_sky_blue = Col(135, 206, 250)
light_slate_gray = Col(119, 136, 153)
light_steel_blue = Col(176, 196, 222)
light_yellow = Col(255, 255, 224)
lime = Col(0, 255, 0)
lime_green = Col(50, 205, 50)
linen = Col(250, 240, 230)
magenta = Col(255, 0, 255)
maroon = Col(128, 0, 0)
medium_aqua_marine = Col(102, 205, 170)
medium_blue = Col(0, 0, 205)
medium_orchid = Col(186, 85, 211)
medium_purple = Col(147, 112, 219)
medium_sea_green = Col(60, 179, 113)
medium_slate_blue = Col(123, 104, 238)
medium_spring_green = Col(0, 250, 154)
medium_turquoise = Col(72, 209, 204)
medium_violet_red = Col(199, 21, 133)
midnight_blue = Col(25, 25, 112)
mint_cream = Col(245, 255, 250)
misty_rose = Col(255, 228, 225)
moccasin = Col(255, 228, 181)
navajo_white = Col(255, 222, 173)
navy = Col(0, 0, 128)
old_lace = Col(253, 245, 230)
olive = Col(128, 128, 0)
olive_drab = Col(107, 142, 35)
orange = Col(255, 165, 0)
orange_red = Col(255, 69, 0)
orchid = Col(218, 112, 214)
pale_golden_rod = Col(238, 232, 170)
pale_green = Col(152, 251, 152)
pale_turquoise = Col(175, 238, 238)
pale_violet_red = Col(219, 112, 147)
papaya_whip = Col(255, 239, 213)
peach_puff = Col(255, 218, 185)
peru = Col(205, 133, 63)
pink = Col(255, 192, 203)
plum = Col(221, 160, 221)
powder_blue = Col(176, 224, 230)
purple = Col(128, 0, 128)
red = Col(255, 0, 0)
rosy_brown = Col(188, 143, 143)
royal_blue = Col(65, 105, 225)
saddle_brown = Col(139, 69, 19)
salmon = Col(250, 128, 114)
sandy_brown = Col(244, 164, 96)
sea_green = Col(46, 139, 87)
sea_shell = Col(255, 245, 238)
sienna = Col(160, 82, 45)
silver = Col(192, 192, 192)
sky_blue = Col(135, 206, 235)
slate_blue = Col(106, 90, 205)
slate_gray = Col(112, 128, 144)
snow = Col(255, 250, 250)
spring_green = Col(0, 255, 127)
steel_blue = Col(70, 130, 180)
tan = Col(210, 180, 140)
teal = Col(0, 128, 128)
thistle = Col(216, 191, 216)
tomato = Col(255, 99, 71)
turquoise = Col(64, 224, 208)
violet = Col(238, 130, 238)
wheat = Col(245, 222, 179)
white = Col(255, 255, 255)
white_smoke = Col(245, 245, 245)
yellow = Col(255, 255, 0)
yellow_green = Col(154, 205, 50)
Col.named = NamedCols
| 31.732342 | 115 | 0.573336 | import hashlib
from typing import Iterable, Tuple
from loguru import logger
class Col:
named: "NamedCols"
def __init__(self, r: int, g: int, b: int, clip: bool = False, fix_numeric_type: bool = True):
self._clip = clip
self._fix_numeric_type = fix_numeric_type
self.r = r
self.g = g
self.b = b
@property
def r(self) -> int:
return self._r
@r.setter
def r(self, r: int) -> None:
self._r = self._validate_uint8(r)
@property
def g(self) -> int:
return self._g
def __eq__(self, c: "Col") -> bool:
return self.r == c.r and self.g == c.g and self.b == c.b
@g.setter
def g(self, g: int) -> None:
self._g = self._validate_uint8(g)
@property
def b(self) -> int:
return self._b
@b.setter
def b(self, b: int) -> None:
self._b = self._validate_uint8(b)
@property
def rgb(self) -> Tuple[int, int, int]:
return (self.r, self.g, self.b)
@property
def bgr(self) -> Tuple[int, int, int]:
return (self.b, self.g, self.r)
def _validate_uint8(self, c: int) -> int:
if c is None:
raise ValueError("Color r/g/b must not be None")
if not isinstance(c, int):
if self._fix_numeric_type:
logger.debug("Color r/g/b is meant to be int, so trying to coerce to int")
c = int(c)
else:
raise ValueError("Color r/g/b is meant to be int but it isn't.")
# Should always be >= 0
if c < 0 or c > 255:
if self._clip:
c = min(255, max(0, c))
logger.debug("Color r/g/b must be 0 - 255 but it isn't, so clipping to this range.")
else:
raise ValueError("Color r/g/b must be 0 - 255 but it isn't.")
# Phew, done:
return c
def pick_col(s: str) -> Col:
if not isinstance(s, str):
raise RuntimeError("Please provide a string argument to pick_col")
# Approach based on https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L737
# i.e. hash the string representation
digest = hashlib.md5(s.encode("utf8")).hexdigest()
n = int(len(digest) / 3)
mx = 2 ** (4 * n) - 1
rgb = (int(int(digest[i * n : (i + 1) * n], 16) / mx * 256) for i in range(3))
return Col(*rgb)
class DivergingPalette:
def __init__(self, labels: Iterable[str] = None):
# ColorBrewer Diverging 12-class Paired
self._cols = (
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
)
# Create the lookup (with our own Col objects so they can be mutated)
self._col_map = {}
if labels is not None:
for idx, label in enumerate(labels):
self._col_map[label] = Col(*self._cols[idx % len(self._cols)])
def col(self, label: str) -> Col:
if label not in self._col_map:
idx = len(self._col_map) % len(self._cols)
self._col_map[label] = Col(*self._cols[idx])
return self._col_map[label]
class NamedCols:
alice_blue = Col(240, 248, 255)
antique_white = Col(250, 235, 215)
aqua = Col(0, 255, 255)
aqua_marine = Col(127, 255, 212)
aware_blue_dark = Col(0, 81, 155)
aware_blue_light = Col(87, 200, 231)
azure = Col(240, 255, 255)
beige = Col(245, 245, 220)
bisque = Col(255, 228, 196)
black = Col(0, 0, 0)
blanched_almond = Col(255, 235, 205)
blue = Col(0, 0, 255)
blue_violet = Col(138, 43, 226)
brown = Col(165, 42, 42)
burly_wood = Col(222, 184, 135)
cadet_blue = Col(95, 158, 160)
chart_reuse = Col(127, 255, 0)
chocolate = Col(210, 105, 30)
coral = Col(255, 127, 80)
corn_flower_blue = Col(100, 149, 237)
corn_silk = Col(255, 248, 220)
crimson = Col(220, 20, 60)
cyan = Col(0, 255, 255)
dark_blue = Col(0, 0, 139)
dark_cyan = Col(0, 139, 139)
dark_golden_rod = Col(184, 134, 11)
dark_gray = Col(169, 169, 169)
dark_green = Col(0, 100, 0)
dark_grey = Col(169, 169, 169)
dark_khaki = Col(189, 183, 107)
dark_magenta = Col(139, 0, 139)
dark_olive_green = Col(85, 107, 47)
dark_orange = Col(255, 140, 0)
dark_orchid = Col(153, 50, 204)
dark_red = Col(139, 0, 0)
dark_salmon = Col(233, 150, 122)
dark_sea_green = Col(143, 188, 143)
dark_slate_blue = Col(72, 61, 139)
dark_slate_gray = Col(47, 79, 79)
dark_turquoise = Col(0, 206, 209)
dark_violet = Col(148, 0, 211)
deep_pink = Col(255, 20, 147)
deep_sky_blue = Col(0, 191, 255)
dim_gray = Col(105, 105, 105)
dim_grey = Col(105, 105, 105)
dodger_blue = Col(30, 144, 255)
firebrick = Col(178, 34, 34)
floral_white = Col(255, 250, 240)
forest_green = Col(34, 139, 34)
fuchsia = Col(255, 0, 255)
gainsboro = Col(220, 220, 220)
ghost_white = Col(248, 248, 255)
gold = Col(255, 215, 0)
golden_rod = Col(218, 165, 32)
gray = Col(128, 128, 128)
green = Col(0, 128, 0)
green_yellow = Col(173, 255, 47)
grey = Col(128, 128, 128)
honeydew = Col(240, 255, 240)
hot_pink = Col(255, 105, 180)
indian_red = Col(205, 92, 92)
indigo = Col(75, 0, 130)
ivory = Col(255, 255, 240)
khaki = Col(240, 230, 140)
lavender = Col(230, 230, 250)
lavender_blush = Col(255, 240, 245)
lawn_green = Col(124, 252, 0)
lemon_chiffon = Col(255, 250, 205)
light_blue = Col(173, 216, 230)
light_coral = Col(240, 128, 128)
light_cyan = Col(224, 255, 255)
light_golden_rod_yellow = Col(250, 250, 210)
light_gray = Col(211, 211, 211)
light_green = Col(144, 238, 144)
light_grey = Col(211, 211, 211)
light_pink = Col(255, 182, 193)
light_salmon = Col(255, 160, 122)
light_sea_green = Col(32, 178, 170)
light_sky_blue = Col(135, 206, 250)
light_slate_gray = Col(119, 136, 153)
light_steel_blue = Col(176, 196, 222)
light_yellow = Col(255, 255, 224)
lime = Col(0, 255, 0)
lime_green = Col(50, 205, 50)
linen = Col(250, 240, 230)
magenta = Col(255, 0, 255)
maroon = Col(128, 0, 0)
medium_aqua_marine = Col(102, 205, 170)
medium_blue = Col(0, 0, 205)
medium_orchid = Col(186, 85, 211)
medium_purple = Col(147, 112, 219)
medium_sea_green = Col(60, 179, 113)
medium_slate_blue = Col(123, 104, 238)
medium_spring_green = Col(0, 250, 154)
medium_turquoise = Col(72, 209, 204)
medium_violet_red = Col(199, 21, 133)
midnight_blue = Col(25, 25, 112)
mint_cream = Col(245, 255, 250)
misty_rose = Col(255, 228, 225)
moccasin = Col(255, 228, 181)
navajo_white = Col(255, 222, 173)
navy = Col(0, 0, 128)
old_lace = Col(253, 245, 230)
olive = Col(128, 128, 0)
olive_drab = Col(107, 142, 35)
orange = Col(255, 165, 0)
orange_red = Col(255, 69, 0)
orchid = Col(218, 112, 214)
pale_golden_rod = Col(238, 232, 170)
pale_green = Col(152, 251, 152)
pale_turquoise = Col(175, 238, 238)
pale_violet_red = Col(219, 112, 147)
papaya_whip = Col(255, 239, 213)
peach_puff = Col(255, 218, 185)
peru = Col(205, 133, 63)
pink = Col(255, 192, 203)
plum = Col(221, 160, 221)
powder_blue = Col(176, 224, 230)
purple = Col(128, 0, 128)
red = Col(255, 0, 0)
rosy_brown = Col(188, 143, 143)
royal_blue = Col(65, 105, 225)
saddle_brown = Col(139, 69, 19)
salmon = Col(250, 128, 114)
sandy_brown = Col(244, 164, 96)
sea_green = Col(46, 139, 87)
sea_shell = Col(255, 245, 238)
sienna = Col(160, 82, 45)
silver = Col(192, 192, 192)
sky_blue = Col(135, 206, 235)
slate_blue = Col(106, 90, 205)
slate_gray = Col(112, 128, 144)
snow = Col(255, 250, 250)
spring_green = Col(0, 255, 127)
steel_blue = Col(70, 130, 180)
tan = Col(210, 180, 140)
teal = Col(0, 128, 128)
thistle = Col(216, 191, 216)
tomato = Col(255, 99, 71)
turquoise = Col(64, 224, 208)
violet = Col(238, 130, 238)
wheat = Col(245, 222, 179)
white = Col(255, 255, 255)
white_smoke = Col(245, 245, 245)
yellow = Col(255, 255, 0)
yellow_green = Col(154, 205, 50)
Col.named = NamedCols
| true | true |
f72b548ac9a0d323d3b91f562646813415979b88 | 8,590 | py | Python | python/caffe/detector.py | MilesQLi/highway-networks | 87d1c8d091ed698b7959c6dbcbbe2ac2e8bf5e3e | [
"BSD-2-Clause"
] | 106 | 2015-08-11T05:45:50.000Z | 2021-04-08T02:29:07.000Z | python/caffe/detector.py | scott89/caffe-crowd | de1875c33e311c12df7dc33decda67706dbf250a | [
"BSD-2-Clause"
] | 2 | 2016-07-07T15:04:10.000Z | 2016-09-12T14:00:13.000Z | python/caffe/detector.py | scott89/caffe-crowd | de1875c33e311c12df7dc33decda67706dbf250a | [
"BSD-2-Clause"
] | 36 | 2015-08-14T07:33:42.000Z | 2021-03-11T09:48:40.000Z | #!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
"""
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
"""
Take
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
context_pad: amount of surrounding context to take s.t. a `context_pad`
sized border of pixels in the network input image is context, as in
R-CNN feature extraction.
"""
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Parameters
----------
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2, 3))
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Parameters
----------
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Returns
-------
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Parameters
----------
context_pad : amount of context for cropping.
"""
# crop dimensions
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| 39.585253 | 80 | 0.590803 |
import numpy as np
import os
import caffe
class Detector(caffe.Net):
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2, 3))
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
import selective_search_ijcv_with_python as selective_search
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width
scale = crop_size / (1. * crop_size - self.context_pad * 2)
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h)
pad_x = round(max(0, -box[1]) * scale_w)
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| true | true |
f72b56c1b85626f170db0a70640a8d036cae722e | 858 | py | Python | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | import threading
# Thread running server processing loop
class ServerThread(threading.Thread):
"""
A helper class to run server in a thread.
The following snippet runs the server for 4 seconds and quit::
server = SimpleServer()
server_thread = ServerThread(server)
server_thread.start()
time.sleep(4)
server_thread.stop()
"""
def __init__(self, server):
"""
:param server: :class:`pcaspy.SimpleServer` object
"""
super(ServerThread, self).__init__()
self.server = server
self.running = True
def run(self):
"""
Start the server processing
"""
while self.running:
self.server.process(0.1)
def stop(self):
"""
Stop the server processing
"""
self.running = False
| 22.578947 | 66 | 0.578089 | import threading
class ServerThread(threading.Thread):
def __init__(self, server):
super(ServerThread, self).__init__()
self.server = server
self.running = True
def run(self):
while self.running:
self.server.process(0.1)
def stop(self):
self.running = False
| true | true |
f72b56de21ac740c008099e3f07466941329206c | 6,793 | py | Python | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Lachnobacterium bovis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def LachnobacteriumBovis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Lachnobacterium bovis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnobacterium bovis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnobacteriumBovis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.565445 | 223 | 0.708229 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def LachnobacteriumBovis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="LachnobacteriumBovis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f72b570f213204c468b4f1b373c9ca56f111cbd6 | 1,128 | py | Python | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 51 | 2019-05-13T10:51:23.000Z | 2021-09-12T08:11:56.000Z | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 2 | 2020-10-08T16:28:45.000Z | 2021-06-23T03:27:42.000Z | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 3 | 2021-05-10T13:08:21.000Z | 2021-06-20T19:58:30.000Z | from ._base import Base, _rule
from .fullname_json import FullnameJson
from .values.text_val import TextVal
from .values.null import NULL
from .values.holder import Holder
from .values.true import TRUE
from .values.false import FALSE
class Field(FullnameJson, TextVal, NULL, Holder, TRUE, FALSE):
reserved = {**Base.reserved, **TextVal.reserved,
**FullnameJson.reserved, **NULL.reserved, **Holder.reserved, **TRUE.reserved, **FALSE.reserved}
tokens = Base.tokens + TextVal.tokens + \
FullnameJson.tokens + NULL.tokens + Holder.tokens + TRUE.tokens + FALSE.tokens
precedence = FullnameJson.precedence
# Tokens
# rules
_start = 'field'
@_rule('''field : STAR
| NUMBER
| TEXTVAL
| NULL
| TRUE
| FALSE''')
def p_field_items(self, p):
p[0] = self.provider.new_record(p[1])
@_rule('field : HOLDER')
def p_field_param(self, p):
p[0] = self.provider.new_param()
@_rule('field : fullname_json')
def p_field_name(self, p):
p[0] = p[1]
| 29.684211 | 111 | 0.606383 | from ._base import Base, _rule
from .fullname_json import FullnameJson
from .values.text_val import TextVal
from .values.null import NULL
from .values.holder import Holder
from .values.true import TRUE
from .values.false import FALSE
class Field(FullnameJson, TextVal, NULL, Holder, TRUE, FALSE):
reserved = {**Base.reserved, **TextVal.reserved,
**FullnameJson.reserved, **NULL.reserved, **Holder.reserved, **TRUE.reserved, **FALSE.reserved}
tokens = Base.tokens + TextVal.tokens + \
FullnameJson.tokens + NULL.tokens + Holder.tokens + TRUE.tokens + FALSE.tokens
precedence = FullnameJson.precedence
_start = 'field'
@_rule('''field : STAR
| NUMBER
| TEXTVAL
| NULL
| TRUE
| FALSE''')
def p_field_items(self, p):
p[0] = self.provider.new_record(p[1])
@_rule('field : HOLDER')
def p_field_param(self, p):
p[0] = self.provider.new_param()
@_rule('field : fullname_json')
def p_field_name(self, p):
p[0] = p[1]
| true | true |
f72b575498b298ee6a17f438e78286df5deb72f6 | 4,471 | py | Python | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | 2 | 2018-06-07T12:46:22.000Z | 2020-11-21T18:10:44.000Z | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | null | null | null | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | null | null | null | from twisted.application import internet, service
from twisted.web import server, resource, client
from twisted.internet import defer, reactor, threads, utils, task
from zope import interface
import yaml
import time
import cgi
import random
from distributex.backends import in_memory_backend, memcached_backend
class SiteRoot(resource.Resource):
isLeaf = True
addSlash = True
def __init__(self, config):
self.backends = {
'memcache': memcached_backend.MemcachedBackend,
'inmemory': in_memory_backend.InMemoryDictBackend
}
self.config = yaml.load(open(config))
self.ready = False
reactor.callWhenRunning(self.setup)
@defer.inlineCallbacks
def setup(self):
# Initialise the configured backend
self.backend = self.backends[
self.config.get('backend', 'inmemory')
](self.config)
self.pools = {}
# Construct our pools
for pool in self.config.get('pools', []):
if 'servers' in pool:
servers = pool['servers'].replace(' ', '').split(',')
else:
servers = []
self.pools[pool['name']] = servers
expire = pool.get('expire', 1800)
maxlocks = pool.get('maxlocks', 1)
yield defer.maybeDeferred(
self.backend.add_pool, pool['name'], expire, maxlocks=maxlocks
)
self.ready = True
defer.returnValue(None)
def request_finish(self, request, result):
request.write(result)
request.finish()
def stop_timer(self, timer):
if timer.running:
timer.stop()
def wait_finish(self, lock, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'YES')
def wait_bailout(self, error, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'NO')
@defer.inlineCallbacks
def wait_lock(self, d, pool, host):
lock = yield defer.maybeDeferred(
self.backend.get_lock, pool, host
)
if lock:
d.callback(True)
def request_wait(self, request, pool, host):
d = defer.Deferred()
timer = task.LoopingCall(self.wait_lock, d, pool, host)
d.addCallback(self.wait_finish, request, timer)
d.addErrback(self.wait_bailout, request, timer)
request.notifyFinish().addErrback(
lambda _: self.stop_timer(timer)
)
timer.start(1 + random.random(), True)
return d
def request_release(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.release_lock, pool, host
).addCallback(lambda _: self.request_finish(request, 'OK'))
def request_getlock(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.get_lock, pool, host
).addCallback(
lambda l: self.request_finish(request, l and 'YES' or 'NO')
)
def handle_request(self, request):
if not self.ready:
reactor.callLater(0, self.handle_request, request)
else:
call = request.path.replace('/', '')
if not (('host' in request.args) and ('pool' in request.args)):
self.request_finish(request, 'INVALID')
return
host = cgi.escape(request.args["host"][0])
pool = cgi.escape(request.args["pool"][0])
if pool in self.pools:
if self.pools[pool]:
# Server not allowed
if not(host in self.pools[pool]):
self.request_finish(request, 'INVALID')
return
else:
self.request_finish(request, 'INVALID')
return
if call == 'wait':
# Wait for a lock
reactor.callLater(random.random()/5, self.request_wait,
request, pool, host)
elif call == 'release':
# Release a lock
self.request_release(request, pool, host)
elif call == 'get':
# Get a lock, don't wait for it
self.request_getlock(request, pool, host)
else:
self.request_finish(request, 'INVALID')
def render_GET(self, request):
self.handle_request(request)
return server.NOT_DONE_YET
| 29.609272 | 78 | 0.569895 | from twisted.application import internet, service
from twisted.web import server, resource, client
from twisted.internet import defer, reactor, threads, utils, task
from zope import interface
import yaml
import time
import cgi
import random
from distributex.backends import in_memory_backend, memcached_backend
class SiteRoot(resource.Resource):
isLeaf = True
addSlash = True
def __init__(self, config):
self.backends = {
'memcache': memcached_backend.MemcachedBackend,
'inmemory': in_memory_backend.InMemoryDictBackend
}
self.config = yaml.load(open(config))
self.ready = False
reactor.callWhenRunning(self.setup)
@defer.inlineCallbacks
def setup(self):
self.backend = self.backends[
self.config.get('backend', 'inmemory')
](self.config)
self.pools = {}
for pool in self.config.get('pools', []):
if 'servers' in pool:
servers = pool['servers'].replace(' ', '').split(',')
else:
servers = []
self.pools[pool['name']] = servers
expire = pool.get('expire', 1800)
maxlocks = pool.get('maxlocks', 1)
yield defer.maybeDeferred(
self.backend.add_pool, pool['name'], expire, maxlocks=maxlocks
)
self.ready = True
defer.returnValue(None)
def request_finish(self, request, result):
request.write(result)
request.finish()
def stop_timer(self, timer):
if timer.running:
timer.stop()
def wait_finish(self, lock, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'YES')
def wait_bailout(self, error, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'NO')
@defer.inlineCallbacks
def wait_lock(self, d, pool, host):
lock = yield defer.maybeDeferred(
self.backend.get_lock, pool, host
)
if lock:
d.callback(True)
def request_wait(self, request, pool, host):
d = defer.Deferred()
timer = task.LoopingCall(self.wait_lock, d, pool, host)
d.addCallback(self.wait_finish, request, timer)
d.addErrback(self.wait_bailout, request, timer)
request.notifyFinish().addErrback(
lambda _: self.stop_timer(timer)
)
timer.start(1 + random.random(), True)
return d
def request_release(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.release_lock, pool, host
).addCallback(lambda _: self.request_finish(request, 'OK'))
def request_getlock(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.get_lock, pool, host
).addCallback(
lambda l: self.request_finish(request, l and 'YES' or 'NO')
)
def handle_request(self, request):
if not self.ready:
reactor.callLater(0, self.handle_request, request)
else:
call = request.path.replace('/', '')
if not (('host' in request.args) and ('pool' in request.args)):
self.request_finish(request, 'INVALID')
return
host = cgi.escape(request.args["host"][0])
pool = cgi.escape(request.args["pool"][0])
if pool in self.pools:
if self.pools[pool]:
if not(host in self.pools[pool]):
self.request_finish(request, 'INVALID')
return
else:
self.request_finish(request, 'INVALID')
return
if call == 'wait':
reactor.callLater(random.random()/5, self.request_wait,
request, pool, host)
elif call == 'release':
self.request_release(request, pool, host)
elif call == 'get':
self.request_getlock(request, pool, host)
else:
self.request_finish(request, 'INVALID')
def render_GET(self, request):
self.handle_request(request)
return server.NOT_DONE_YET
| true | true |
f72b5891618054c5f8898c72c812d2bae47239f4 | 3,403 | py | Python | note/meiduo34/mall/apps/users/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | note/meiduo34/mall/apps/users/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | note/meiduo34/mall/apps/users/models.py | gaosong666/taobao | cec3be71376fb94dc38553360253b70e88855594 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
# from itsdangerous import Serializer
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from mall import settings
from utils.models import BaseModel
class User(AbstractUser):
mobile = models.CharField(max_length=11, unique=True, verbose_name='手机号')
email_active = models.BooleanField(default=False, verbose_name='邮箱验证状态')
default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name='默认地址')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
def generate_verify_email_url(self):
serializer = Serializer(settings.SECRET_KEY, 3600)
# 加载用户信息
token = serializer.dumps({'user_id': self.id, 'email': self.email})
# 注意拼接的过程中对 token进行decode操作
verify_url = 'http://www.meiduo.site:8080/success_verify_email.html?token=' + token.decode()
return verify_url
@staticmethod
def check_verify_email_token(token):
serializer = Serializer(settings.SECRET_KEY, 3600)
try:
# 加载token
result = serializer.loads(token)
except BadData:
return None
else:
user_id = result.get('user_id')
email = result.get('email')
try:
user = User.objects.get(id=user_id, email=email)
except User.DoesNotExist:
user = None
return user
# @classmethod
# def check_active_mail_token(cls, token):
#
# serializer = Serializer(settings.SECRET_KEY, 3600)
#
# try:
# result = serializer.loads(token)
# except BadData:
# return None
# else:
#
# id = result.get('id')
# email = result.get('email')
# try:
# user = User.objects.get(id=id, email=email)
# except User.DoesNotExist:
# user = None
#
# return user
class Address(BaseModel):
"""
用户地址
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', verbose_name='用户')
title = models.CharField(max_length=20, verbose_name='地址名称')
receiver = models.CharField(max_length=20, verbose_name='收货人')
province = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='province_addresses', verbose_name='省')
city = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='city_addresses', verbose_name='市')
district = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='district_addresses', verbose_name='区')
place = models.CharField(max_length=50, verbose_name='地址')
mobile = models.CharField(max_length=11, verbose_name='手机')
tel = models.CharField(max_length=20, null=True, blank=True, default='', verbose_name='固定电话')
email = models.CharField(max_length=30, null=True, blank=True, default='', verbose_name='电子邮箱')
is_deleted = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_address'
verbose_name = '用户地址'
verbose_name_plural = verbose_name
ordering = ['-update_time'] | 37.395604 | 125 | 0.645901 | from django.contrib.auth.models import AbstractUser
from django.db import models
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from mall import settings
from utils.models import BaseModel
class User(AbstractUser):
mobile = models.CharField(max_length=11, unique=True, verbose_name='手机号')
email_active = models.BooleanField(default=False, verbose_name='邮箱验证状态')
default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name='默认地址')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
def generate_verify_email_url(self):
serializer = Serializer(settings.SECRET_KEY, 3600)
token = serializer.dumps({'user_id': self.id, 'email': self.email})
verify_url = 'http://www.meiduo.site:8080/success_verify_email.html?token=' + token.decode()
return verify_url
@staticmethod
def check_verify_email_token(token):
serializer = Serializer(settings.SECRET_KEY, 3600)
try:
result = serializer.loads(token)
except BadData:
return None
else:
user_id = result.get('user_id')
email = result.get('email')
try:
user = User.objects.get(id=user_id, email=email)
except User.DoesNotExist:
user = None
return user
class Address(BaseModel):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', verbose_name='用户')
title = models.CharField(max_length=20, verbose_name='地址名称')
receiver = models.CharField(max_length=20, verbose_name='收货人')
province = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='province_addresses', verbose_name='省')
city = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='city_addresses', verbose_name='市')
district = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='district_addresses', verbose_name='区')
place = models.CharField(max_length=50, verbose_name='地址')
mobile = models.CharField(max_length=11, verbose_name='手机')
tel = models.CharField(max_length=20, null=True, blank=True, default='', verbose_name='固定电话')
email = models.CharField(max_length=30, null=True, blank=True, default='', verbose_name='电子邮箱')
is_deleted = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_address'
verbose_name = '用户地址'
verbose_name_plural = verbose_name
ordering = ['-update_time'] | true | true |
f72b58f5171215cd31d5b12f28261a896f30aa4c | 7,827 | py | Python | research/object_detection/training/TFLite_detection_video.py | geometrikal/tensorflow_models | 44a82f3f18a2e62b1cd99b94922f752be0672f46 | [
"Apache-2.0"
] | null | null | null | research/object_detection/training/TFLite_detection_video.py | geometrikal/tensorflow_models | 44a82f3f18a2e62b1cd99b94922f752be0672f46 | [
"Apache-2.0"
] | null | null | null | research/object_detection/training/TFLite_detection_video.py | geometrikal/tensorflow_models | 44a82f3f18a2e62b1cd99b94922f752be0672f46 | [
"Apache-2.0"
] | null | null | null | ######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/2/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a
# video. It draws boxes and scores around the objects of interest in each frame
# from the video.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import importlib.util
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--video', help='Name of the video file',
default='test.mp4')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--subsample', type=int, default=1, help='Subsample the input image')
parser.add_argument('--offset', type=int, default=0, help='Offset into file')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
VIDEO_NAME = args.video
min_conf_threshold = float(args.threshold)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to video file
VIDEO_PATH = os.path.join(CWD_PATH,VIDEO_NAME)
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Open video file
video = cv2.VideoCapture(VIDEO_PATH)
imW = video.get(cv2.CAP_PROP_FRAME_WIDTH)
imH = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
out = cv2.VideoWriter('output.mp4', -1, 20.0, (int(imW),int(imH)))
fidx = 0
while(video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = video.read()
if not ret:
print('Reached the end of the video!')
break
print(fidx)
fidx += 1
if fidx < args.offset:
continue
if args.subsample > 1:
imH, imW, _ = frame.shape
frame = cv2.resize(frame, (imW // args.subsample, imH // args.subsample))
# frame = increase_brightness(frame, value=70)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(int(num)):
# for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# All the results have been drawn on the frame, so it's time to display it.
out.write(frame)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
out.release()
video.release()
cv2.destroyAllWindows()
| 37.995146 | 180 | 0.689408 | ument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--video', help='Name of the video file',
default='test.mp4')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--subsample', type=int, default=1, help='Subsample the input image')
parser.add_argument('--offset', type=int, default=0, help='Offset into file')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
VIDEO_NAME = args.video
min_conf_threshold = float(args.threshold)
use_TPU = args.edgetpu
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
if use_TPU:
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
CWD_PATH = os.getcwd()
VIDEO_PATH = os.path.join(CWD_PATH,VIDEO_NAME)
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
if labels[0] == '???':
del(labels[0])
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
video = cv2.VideoCapture(VIDEO_PATH)
imW = video.get(cv2.CAP_PROP_FRAME_WIDTH)
imH = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
out = cv2.VideoWriter('output.mp4', -1, 20.0, (int(imW),int(imH)))
fidx = 0
while(video.isOpened()):
ret, frame = video.read()
if not ret:
print('Reached the end of the video!')
break
print(fidx)
fidx += 1
if fidx < args.offset:
continue
if args.subsample > 1:
imH, imW, _ = frame.shape
frame = cv2.resize(frame, (imW // args.subsample, imH // args.subsample))
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
boxes = interpreter.get_tensor(output_details[0]['index'])[0]
classes = interpreter.get_tensor(output_details[1]['index'])[0]
scores = interpreter.get_tensor(output_details[2]['index'])[0]
num = interpreter.get_tensor(output_details[3]['index'])[0]
for i in range(int(num)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
object_name = labels[int(classes[i])]
label = '%s: %d%%' % (object_name, int(scores[i]*100))
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
label_ymin = max(ymin, labelSize[1] + 10)
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
out.write(frame)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
out.release()
video.release()
cv2.destroyAllWindows()
| true | true |
f72b5994c29c19a3357fc7ba21a214ddbce1dcfd | 8,591 | py | Python | my_test/get_graph.py | RuoyuX-2018/6998DL | a9b75ee63a92c6824db9ac25cc6d931713e0cae5 | [
"BSD-3-Clause"
] | null | null | null | my_test/get_graph.py | RuoyuX-2018/6998DL | a9b75ee63a92c6824db9ac25cc6d931713e0cae5 | [
"BSD-3-Clause"
] | null | null | null | my_test/get_graph.py | RuoyuX-2018/6998DL | a9b75ee63a92c6824db9ac25cc6d931713e0cae5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 15:47:45 2021
@author: xuery
"""
import cv2
import time
import numpy as np
import os
import copy
import pickle
import random
import math
import matplotlib.pyplot as plt
from scipy import spatial
from skimage import morphology
from sklearn.mixture import GaussianMixture
from shapely.geometry import LineString, Point
from mpl_toolkits.mplot3d import Axes3D
class get_graph():
def __init__(self, raw_img):
self.raw_img = cv2.resize(raw_img, (512,512))
self.stride = 30
self.all_centroids = []
def get_binary(self):
gray_img = cv2.cvtColor(self.raw_img, cv2.COLOR_RGB2GRAY)
_, binary_img = cv2.threshold(gray_img, 100, 255, cv2.THRESH_BINARY_INV)
return binary_img
def ske2point(self):
skeleton_img = self.get_binary()
img_w, img_h = skeleton_img.shape
for i in range(img_w//self.stride):
for j in range(img_h//self.stride):
small_img = skeleton_img[i*self.stride:(i+1)*self.stride, j*self.stride:(j+1)*self.stride]
x_idx, y_idx = small_img.nonzero()
if len(x_idx) == 0:
continue
x_center, y_center = sum(x_idx) / len(x_idx) + i * self.stride,\
sum(y_idx) / len(x_idx) + j * self.stride
#all_centorids stores the idx of points
self.all_centroids.append(np.array([int(x_center), int(y_center)]))
self.all_centroids = np.array(self.all_centroids)
self.centroids_copy = copy.deepcopy(self.all_centroids)
def optimization(self, save_path=None):
#for the points in all_centroid that don't belong to the rope, delete it
noise_idx = []
binary_img = self.get_binary()
for i in range(len(self.all_centroids)):
if binary_img[int(self.all_centroids[i][0])][int(self.all_centroids[i][1])] == 0:
noise_idx.append(i)
self.all_centroids = np.delete(self.all_centroids, noise_idx, axis=0)
if save_path != None:
self.img_point_write(save_path, all_centroids, binary_img)
def visualization(self):
self.optimization()
plt.plot(self.all_centroids[:,0], self.all_centroids[:,1], 'bo', ms=5)
plt.show()
def graph(self, num_neigh_points = 10):
self.ske2point()
self.visualization()
tree = spatial.KDTree(self.all_centroids)
start_point = [500, 0]
neigh_points_idx, neigh_points = self.find_neigh_points(tree, start_point, 2)
next_point = neigh_points[0]
query_pair = [start_point, next_point]
point_order = query_pair
while True:
if len(self.all_centroids) < num_neigh_points:
break
if len(self.all_centroids) == 30:
break
tree = spatial.KDTree(self.all_centroids)
neigh_points_idx, neigh_points = self.find_neigh_points(tree, query_pair[1], num_neigh_points)
idx, next_point = self.find_path(query_pair, neigh_points)
if idx == -99:
print("end of construction...")
return point_order
query_pair = [query_pair[1], next_point]
point_order.append(next_point)
#pop out the walked point
self.all_centroids = self.all_centroids.tolist()
self.all_centroids.pop(neigh_points_idx[idx])
self.all_centroids = np.array(self.all_centroids)
print("remain lens of points: ", len(self.all_centroids))
return point_order
def find_neigh_points(self, tree, centroid, num_points):
dist, near_points_idx = tree.query(centroid, k=num_points)
near_points = self.all_centroids[near_points_idx]
return near_points_idx[1:], near_points[1:]
def find_path(self, query_pair, neigh_points):
v_query = query_pair[1] - query_pair[0]
next_point = np.zeros_like(query_pair[0])
angle_diff = np.pi
next_idx = -99
for i in range(len(neigh_points)):
v_compare = query_pair[1] - neigh_points[i]
#if the dist of all neigh_points is more than 65, break. This setting is for noise
if np.linalg.norm(v_compare) >70:
continue
#calculate the angle of two vectors
unit_v1 = v_query / np.linalg.norm(v_query)
unit_v2 = v_compare / np.linalg.norm(v_compare)
dot_product = np.dot(unit_v1, unit_v2)
angle = np.arccos(dot_product) #radian
if np.pi - angle < angle_diff:
next_point = neigh_points[i]
angle_diff = np.pi - angle
next_idx = i
return next_idx, next_point
def find_crossing(self, point_order, visual=False):
#create lines
pairs = []
crossing = []
for i in range(len(point_order)-1):
new_pair = np.array([point_order[i], point_order[i+1]])
pairs.append(new_pair)
for i in range(len(pairs)):
for j in range(len(pairs)-i):
intersec = self.intersection(pairs[i], pairs[j+i])
if intersec is not False:
crossing.append([intersec, pairs[i][0], pairs[j+i][0]])
if visual == True:
self.visualization_final_graph(point_order, crossing)
return crossing
#if no intersection, return False, else return the value of intersection
def intersection(self, pair1, pair2):
#if two pairs has a same point, break
if np.all(pair1[0]-pair2[0]==0) or np.all(pair1[1]-pair2[0]==0) \
or np.all(pair1[0]-pair2[1]==0) or np.all(pair1[1]-pair2[1]==0):
return False
line1 = LineString([pair1[0], pair1[1]])
line2 = LineString([pair2[0], pair2[1]])
intersection_point = line1.intersection(line2)
#no intersection
if intersection_point.is_empty:
return False
else:
return np.array([intersection_point.x, intersection_point.y])
def visualization_final_graph(self, point_order, crossing):
x, y = zip(*point_order)
plt.plot(x, y, '-o', zorder=1)
crossing = np.array(crossing)
c_x = crossing[:,0,0]
c_y = crossing[:,0,1]
plt.scatter(c_x, c_y, 20, 'r', zorder=2)
plt.show()
def trajectory(self, env, sa, point_order, crossing, stride):
picker_pos, particle_pos = sa.action_space.Picker._get_pos()
print(particle_pos)
particle_dist_2d = np.linalg.norm(particle_pos[0] - particle_pos[1])
init_particle = particle_pos[random.randint(0,len(particle_pos))].tolist()
particle_list = []
particle_list.append(init_particle)
for i in range(len(point_order)-stride):
if i % stride != 0:
continue
curr_particle = particle_list[i//stride]
y_o = point_order[i+stride][1] - point_order[i][1]
x_o = point_order[i+stride][0] - point_order[i][0]
orientation = abs(y_o / x_o)
theta = math.atan(orientation)
if x_o == 0:
x_o = 0.1
if y_o == 0:
y_o = 0.1
x = curr_particle[0] + math.cos(theta) * particle_dist_2d * x_o / abs(x_o)
y = curr_particle[2] + math.sin(theta) * particle_dist_2d * y_o / abs(y_o)
next_particle = [x, curr_particle[1], y, curr_particle[3]]
particle_list.append(next_particle)
for i in range(len(particle_list)):
if i == 3:
particle_list[i][1] = 0.0145
if i == 4:
particle_list[i][1] = 0.0245
if i == 5:
particle_list[i][1] = 0.0145
if i == 9:
particle_list[i][1] = 0.0145
if i == 10:
particle_list[i][1] = 0.0245
if i == 11:
particle_list[i][1] = 0.0145
particle_list = np.array(particle_list)
particle_x = particle_list[:, 0]
particle_z = particle_list[:, 1]
particle_y = particle_list[:, 2]
fig=plt.figure()
ax2 = Axes3D(fig)
ax2.scatter3D(particle_x,particle_y,particle_z, cmap='Blues')
ax2.plot3D(particle_x,particle_y,particle_z,'gray')
plt.show()
return particle_list | 38.352679 | 106 | 0.583867 |
import cv2
import time
import numpy as np
import os
import copy
import pickle
import random
import math
import matplotlib.pyplot as plt
from scipy import spatial
from skimage import morphology
from sklearn.mixture import GaussianMixture
from shapely.geometry import LineString, Point
from mpl_toolkits.mplot3d import Axes3D
class get_graph():
def __init__(self, raw_img):
self.raw_img = cv2.resize(raw_img, (512,512))
self.stride = 30
self.all_centroids = []
def get_binary(self):
gray_img = cv2.cvtColor(self.raw_img, cv2.COLOR_RGB2GRAY)
_, binary_img = cv2.threshold(gray_img, 100, 255, cv2.THRESH_BINARY_INV)
return binary_img
def ske2point(self):
skeleton_img = self.get_binary()
img_w, img_h = skeleton_img.shape
for i in range(img_w//self.stride):
for j in range(img_h//self.stride):
small_img = skeleton_img[i*self.stride:(i+1)*self.stride, j*self.stride:(j+1)*self.stride]
x_idx, y_idx = small_img.nonzero()
if len(x_idx) == 0:
continue
x_center, y_center = sum(x_idx) / len(x_idx) + i * self.stride,\
sum(y_idx) / len(x_idx) + j * self.stride
self.all_centroids.append(np.array([int(x_center), int(y_center)]))
self.all_centroids = np.array(self.all_centroids)
self.centroids_copy = copy.deepcopy(self.all_centroids)
def optimization(self, save_path=None):
noise_idx = []
binary_img = self.get_binary()
for i in range(len(self.all_centroids)):
if binary_img[int(self.all_centroids[i][0])][int(self.all_centroids[i][1])] == 0:
noise_idx.append(i)
self.all_centroids = np.delete(self.all_centroids, noise_idx, axis=0)
if save_path != None:
self.img_point_write(save_path, all_centroids, binary_img)
def visualization(self):
self.optimization()
plt.plot(self.all_centroids[:,0], self.all_centroids[:,1], 'bo', ms=5)
plt.show()
def graph(self, num_neigh_points = 10):
self.ske2point()
self.visualization()
tree = spatial.KDTree(self.all_centroids)
start_point = [500, 0]
neigh_points_idx, neigh_points = self.find_neigh_points(tree, start_point, 2)
next_point = neigh_points[0]
query_pair = [start_point, next_point]
point_order = query_pair
while True:
if len(self.all_centroids) < num_neigh_points:
break
if len(self.all_centroids) == 30:
break
tree = spatial.KDTree(self.all_centroids)
neigh_points_idx, neigh_points = self.find_neigh_points(tree, query_pair[1], num_neigh_points)
idx, next_point = self.find_path(query_pair, neigh_points)
if idx == -99:
print("end of construction...")
return point_order
query_pair = [query_pair[1], next_point]
point_order.append(next_point)
#pop out the walked point
self.all_centroids = self.all_centroids.tolist()
self.all_centroids.pop(neigh_points_idx[idx])
self.all_centroids = np.array(self.all_centroids)
print("remain lens of points: ", len(self.all_centroids))
return point_order
def find_neigh_points(self, tree, centroid, num_points):
dist, near_points_idx = tree.query(centroid, k=num_points)
near_points = self.all_centroids[near_points_idx]
return near_points_idx[1:], near_points[1:]
def find_path(self, query_pair, neigh_points):
v_query = query_pair[1] - query_pair[0]
next_point = np.zeros_like(query_pair[0])
angle_diff = np.pi
next_idx = -99
for i in range(len(neigh_points)):
v_compare = query_pair[1] - neigh_points[i]
#if the dist of all neigh_points is more than 65, break. This setting is for noise
if np.linalg.norm(v_compare) >70:
continue
#calculate the angle of two vectors
unit_v1 = v_query / np.linalg.norm(v_query)
unit_v2 = v_compare / np.linalg.norm(v_compare)
dot_product = np.dot(unit_v1, unit_v2)
angle = np.arccos(dot_product) #radian
if np.pi - angle < angle_diff:
next_point = neigh_points[i]
angle_diff = np.pi - angle
next_idx = i
return next_idx, next_point
def find_crossing(self, point_order, visual=False):
#create lines
pairs = []
crossing = []
for i in range(len(point_order)-1):
new_pair = np.array([point_order[i], point_order[i+1]])
pairs.append(new_pair)
for i in range(len(pairs)):
for j in range(len(pairs)-i):
intersec = self.intersection(pairs[i], pairs[j+i])
if intersec is not False:
crossing.append([intersec, pairs[i][0], pairs[j+i][0]])
if visual == True:
self.visualization_final_graph(point_order, crossing)
return crossing
#if no intersection, return False, else return the value of intersection
def intersection(self, pair1, pair2):
#if two pairs has a same point, break
if np.all(pair1[0]-pair2[0]==0) or np.all(pair1[1]-pair2[0]==0) \
or np.all(pair1[0]-pair2[1]==0) or np.all(pair1[1]-pair2[1]==0):
return False
line1 = LineString([pair1[0], pair1[1]])
line2 = LineString([pair2[0], pair2[1]])
intersection_point = line1.intersection(line2)
#no intersection
if intersection_point.is_empty:
return False
else:
return np.array([intersection_point.x, intersection_point.y])
def visualization_final_graph(self, point_order, crossing):
x, y = zip(*point_order)
plt.plot(x, y, '-o', zorder=1)
crossing = np.array(crossing)
c_x = crossing[:,0,0]
c_y = crossing[:,0,1]
plt.scatter(c_x, c_y, 20, 'r', zorder=2)
plt.show()
def trajectory(self, env, sa, point_order, crossing, stride):
picker_pos, particle_pos = sa.action_space.Picker._get_pos()
print(particle_pos)
particle_dist_2d = np.linalg.norm(particle_pos[0] - particle_pos[1])
init_particle = particle_pos[random.randint(0,len(particle_pos))].tolist()
particle_list = []
particle_list.append(init_particle)
for i in range(len(point_order)-stride):
if i % stride != 0:
continue
curr_particle = particle_list[i//stride]
y_o = point_order[i+stride][1] - point_order[i][1]
x_o = point_order[i+stride][0] - point_order[i][0]
orientation = abs(y_o / x_o)
theta = math.atan(orientation)
if x_o == 0:
x_o = 0.1
if y_o == 0:
y_o = 0.1
x = curr_particle[0] + math.cos(theta) * particle_dist_2d * x_o / abs(x_o)
y = curr_particle[2] + math.sin(theta) * particle_dist_2d * y_o / abs(y_o)
next_particle = [x, curr_particle[1], y, curr_particle[3]]
particle_list.append(next_particle)
for i in range(len(particle_list)):
if i == 3:
particle_list[i][1] = 0.0145
if i == 4:
particle_list[i][1] = 0.0245
if i == 5:
particle_list[i][1] = 0.0145
if i == 9:
particle_list[i][1] = 0.0145
if i == 10:
particle_list[i][1] = 0.0245
if i == 11:
particle_list[i][1] = 0.0145
particle_list = np.array(particle_list)
particle_x = particle_list[:, 0]
particle_z = particle_list[:, 1]
particle_y = particle_list[:, 2]
fig=plt.figure()
ax2 = Axes3D(fig)
ax2.scatter3D(particle_x,particle_y,particle_z, cmap='Blues')
ax2.plot3D(particle_x,particle_y,particle_z,'gray')
plt.show()
return particle_list | true | true |
f72b5999064274f48ed6073bf289ff75177ea60f | 4,247 | py | Python | tests/make_entry_test.py | asottile/pypi_practices | a4da562c471198dd35806c52016fac44bb46c08d | [
"MIT"
] | 3 | 2015-02-16T16:41:43.000Z | 2016-08-25T03:35:12.000Z | tests/make_entry_test.py | asottile/pypi_practices | a4da562c471198dd35806c52016fac44bb46c08d | [
"MIT"
] | 1 | 2017-08-15T04:02:30.000Z | 2017-08-15T04:02:30.000Z | tests/make_entry_test.py | asottile/pypi_practices | a4da562c471198dd35806c52016fac44bb46c08d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import io
import mock
import os.path
import pytest
import sys
from pypi_practices import five
from pypi_practices.errors import FileValidationError
from pypi_practices.make_entry import make_entry
from testing.util import REMatcher
@pytest.fixture
def fake_entry():
class fake_entry_state(object):
cwd_arg = None
config_arg = None
entry = None
def check_fn(cwd, fix, config):
fake_entry_state.cwd_arg = cwd
fake_entry_state.fix_arg = fix
fake_entry_state.config_arg = config
return 0
fake_entry_state.entry = staticmethod(make_entry(check_fn))
yield fake_entry_state
def test_converts_args_to_text(fake_entry):
# Native str (py2 vs py3)
args = [str('--cwd'), str('path')]
fake_entry.entry(args)
assert type(fake_entry.cwd_arg) is five.text
assert fake_entry.cwd_arg == 'path'
def test_cwd_defaults_to_dot(fake_entry):
fake_entry.entry([])
assert fake_entry.cwd_arg == '.'
def test_fix_calls_fix(fake_entry):
fake_entry.entry(['--fix'])
assert fake_entry.fix_arg is True
def test_ignores_extra_filename_args(fake_entry):
fake_entry.entry(['README.md', 'tox.ini'])
assert fake_entry.cwd_arg == '.'
@pytest.mark.parametrize('args', ([], ['--fix']))
def test_returns_0_for_ok(fake_entry, args):
ret = fake_entry.entry(args)
assert ret == 0
def test_no_args_passed_uses_sys_argv(fake_entry):
with mock.patch.object(sys, 'argv', ['hook-exe', '--cwd', 'foo_cwd']):
fake_entry.entry()
assert fake_entry.cwd_arg == 'foo_cwd'
@pytest.fixture
def print_mock():
with mock.patch.object(five.builtins, 'print') as print_mock:
yield print_mock
def test_ok_prints_nothing(fake_entry, print_mock):
fake_entry.entry([])
assert print_mock.call_count == 0
def test_raises_validation_error(print_mock):
def raising_check(*_):
raise FileValidationError(
'README.md',
'Missing something.'
)
entry = make_entry(raising_check)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md: Missing something.\n'
'\n'
'Manually edit the file above to fix.'
)
def test_message_contains_line_if_specified(print_mock):
def raising_check_with_line_number(*_):
raise FileValidationError(
'README.md',
'Missing something.',
line=5,
)
entry = make_entry(raising_check_with_line_number)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md:5: Missing something.\n'
'\n'
'Manually edit the file above to fix.'
)
def test_auto_fixable_prints_auto_fixable(print_mock):
def raising_check_auto_fixable(*_):
raise FileValidationError(
'README.md',
'Missing something.',
is_auto_fixable=True,
)
entry = make_entry(raising_check_auto_fixable)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md: Missing something.\n'
'\n'
'To attempt automatic fixing, run with --fix.'
)
def test_passes_config(tmpdir, fake_entry):
config_path = os.path.join(tmpdir.strpath, '.pypi-practices-config.yaml')
with io.open(config_path, 'w') as config_file:
config_file.write('autofix: true')
ret = fake_entry.entry(['--cwd', tmpdir.strpath])
assert ret == 0
assert fake_entry.config_arg == {'autofix': True}
def test_failing_config(tmpdir, fake_entry, print_mock):
config_path = os.path.join(tmpdir.strpath, '.pypi-practices-config.yaml')
with io.open(config_path, 'w') as config_file:
config_file.write('foo: "')
ret = fake_entry.entry(['--cwd', tmpdir.strpath])
assert ret == 1
print_mock.assert_called_once_with(REMatcher(
r'.pypi-practices-config.yaml: Invalid Yaml:\n\n'
r'while scanning a quoted scalar\n'
r' in ".+\.pypi-practices-config.yaml", line 1, column 6\n'
r'found unexpected end of stream\n'
r' in ".+/.pypi-practices-config.yaml", line 1, column 7'
))
| 27.224359 | 77 | 0.669178 | from __future__ import absolute_import
from __future__ import unicode_literals
import io
import mock
import os.path
import pytest
import sys
from pypi_practices import five
from pypi_practices.errors import FileValidationError
from pypi_practices.make_entry import make_entry
from testing.util import REMatcher
@pytest.fixture
def fake_entry():
class fake_entry_state(object):
cwd_arg = None
config_arg = None
entry = None
def check_fn(cwd, fix, config):
fake_entry_state.cwd_arg = cwd
fake_entry_state.fix_arg = fix
fake_entry_state.config_arg = config
return 0
fake_entry_state.entry = staticmethod(make_entry(check_fn))
yield fake_entry_state
def test_converts_args_to_text(fake_entry):
args = [str('--cwd'), str('path')]
fake_entry.entry(args)
assert type(fake_entry.cwd_arg) is five.text
assert fake_entry.cwd_arg == 'path'
def test_cwd_defaults_to_dot(fake_entry):
fake_entry.entry([])
assert fake_entry.cwd_arg == '.'
def test_fix_calls_fix(fake_entry):
fake_entry.entry(['--fix'])
assert fake_entry.fix_arg is True
def test_ignores_extra_filename_args(fake_entry):
fake_entry.entry(['README.md', 'tox.ini'])
assert fake_entry.cwd_arg == '.'
@pytest.mark.parametrize('args', ([], ['--fix']))
def test_returns_0_for_ok(fake_entry, args):
ret = fake_entry.entry(args)
assert ret == 0
def test_no_args_passed_uses_sys_argv(fake_entry):
with mock.patch.object(sys, 'argv', ['hook-exe', '--cwd', 'foo_cwd']):
fake_entry.entry()
assert fake_entry.cwd_arg == 'foo_cwd'
@pytest.fixture
def print_mock():
with mock.patch.object(five.builtins, 'print') as print_mock:
yield print_mock
def test_ok_prints_nothing(fake_entry, print_mock):
fake_entry.entry([])
assert print_mock.call_count == 0
def test_raises_validation_error(print_mock):
def raising_check(*_):
raise FileValidationError(
'README.md',
'Missing something.'
)
entry = make_entry(raising_check)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md: Missing something.\n'
'\n'
'Manually edit the file above to fix.'
)
def test_message_contains_line_if_specified(print_mock):
def raising_check_with_line_number(*_):
raise FileValidationError(
'README.md',
'Missing something.',
line=5,
)
entry = make_entry(raising_check_with_line_number)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md:5: Missing something.\n'
'\n'
'Manually edit the file above to fix.'
)
def test_auto_fixable_prints_auto_fixable(print_mock):
def raising_check_auto_fixable(*_):
raise FileValidationError(
'README.md',
'Missing something.',
is_auto_fixable=True,
)
entry = make_entry(raising_check_auto_fixable)
ret = entry([])
assert ret == 1
print_mock.assert_called_once_with(
'README.md: Missing something.\n'
'\n'
'To attempt automatic fixing, run with --fix.'
)
def test_passes_config(tmpdir, fake_entry):
config_path = os.path.join(tmpdir.strpath, '.pypi-practices-config.yaml')
with io.open(config_path, 'w') as config_file:
config_file.write('autofix: true')
ret = fake_entry.entry(['--cwd', tmpdir.strpath])
assert ret == 0
assert fake_entry.config_arg == {'autofix': True}
def test_failing_config(tmpdir, fake_entry, print_mock):
config_path = os.path.join(tmpdir.strpath, '.pypi-practices-config.yaml')
with io.open(config_path, 'w') as config_file:
config_file.write('foo: "')
ret = fake_entry.entry(['--cwd', tmpdir.strpath])
assert ret == 1
print_mock.assert_called_once_with(REMatcher(
r'.pypi-practices-config.yaml: Invalid Yaml:\n\n'
r'while scanning a quoted scalar\n'
r' in ".+\.pypi-practices-config.yaml", line 1, column 6\n'
r'found unexpected end of stream\n'
r' in ".+/.pypi-practices-config.yaml", line 1, column 7'
))
| true | true |
f72b59e95919066e8f4ddff2339f880b70006b93 | 17,174 | py | Python | tests/test_coroutine_sink.py | phillipuniverse/loguru | 3d5234541c81318e7f6f725eca7bab294fe09c23 | [
"MIT"
] | 11,391 | 2018-12-08T17:44:13.000Z | 2022-03-31T17:55:24.000Z | tests/test_coroutine_sink.py | vkirilenko/loguru | 68616485f4f0decb5fced36a16040f5e05e2842f | [
"MIT"
] | 610 | 2018-12-08T18:03:03.000Z | 2022-03-31T22:28:14.000Z | tests/test_coroutine_sink.py | vkirilenko/loguru | 68616485f4f0decb5fced36a16040f5e05e2842f | [
"MIT"
] | 601 | 2018-12-08T17:46:42.000Z | 2022-03-30T04:23:56.000Z | import asyncio
import logging
import multiprocessing
import re
import sys
import threading
import pytest
import loguru
from loguru import logger
async def async_writer(msg):
await asyncio.sleep(0.01)
print(msg, end="")
class AsyncWriter:
async def __call__(self, msg):
await asyncio.sleep(0.01)
print(msg, end="")
def test_coroutine_function(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_async_callable_sink(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(AsyncWriter(), format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_concurrent_execution(capsys):
async def task(i):
logger.debug("=> {}", i)
async def main():
tasks = [task(i) for i in range(10)]
await asyncio.gather(*tasks)
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(main())
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("=> %d" % i for i in range(10))
def test_recursive_coroutine(capsys):
async def task(i):
if i == 0:
await logger.complete()
return
logger.info("{}!", i)
await task(i - 1)
logger.add(async_writer, format="{message}")
asyncio.run(task(9))
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("%d!" % i for i in range(1, 10))
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_using_another_event_loop(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_before_add(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_after_add(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
asyncio.set_event_loop(loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_run_mutiple_different_loops(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
logger.add(async_writer, format="{message}", loop=None)
asyncio.run(worker(1))
asyncio.run(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_run_multiple_same_loop(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
def test_run_multiple_same_loop_set_global(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_complete_in_another_run(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_complete_in_another_run_set_global(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_tasks_cancelled_on_remove(capsys):
logger.add(async_writer, format="{message}", catch=False)
async def foo():
logger.info("A")
logger.info("B")
logger.info("C")
logger.remove()
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_remove_without_tasks(capsys):
logger.add(async_writer, format="{message}", catch=False)
logger.remove()
async def foo():
logger.info("!")
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_without_tasks(capsys):
logger.add(async_writer, catch=False)
async def worker():
await logger.complete()
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_stream_noop(capsys):
logger.add(sys.stderr, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_complete_file_noop(tmpdir):
filepath = tmpdir.join("test.log")
logger.add(str(filepath), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert filepath.read() == "A\nB\nC\nD\n"
def test_complete_function_noop():
out = ""
def write(msg):
nonlocal out
out += msg
logger.add(write, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert out == "A\nB\nC\nD\n"
def test_complete_standard_noop(capsys):
logger.add(logging.StreamHandler(sys.stderr), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_exception_in_coroutine_caught(capsys):
async def sink(msg):
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_not_caught(capsys, caplog):
async def sink(msg):
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
def test_exception_in_coroutine_during_complete_caught(capsys):
async def sink(msg):
await asyncio.sleep(0.1)
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_during_complete_not_caught(capsys, caplog):
async def sink(msg):
await asyncio.sleep(0.1)
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_not_none(capsys):
loop = asyncio.new_event_loop()
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_enqueue_coroutine_loop_not_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_is_none(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
asyncio.run(worker("A"))
out, err = capsys.readouterr()
assert out == err == ""
loop.run_until_complete(worker("B"))
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_enqueue_coroutine_loop_is_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
loop.run_until_complete(worker("A"))
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_custom_complete_function(capsys):
awaited = False
class Handler:
def write(self, message):
print(message, end="")
async def complete(self):
nonlocal awaited
awaited = True
async def worker():
logger.info("A")
await logger.complete()
logger.add(Handler(), catch=False, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
assert awaited
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
main_loop.run_until_complete(worker_1())
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop_set_global(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_1())
asyncio.set_event_loop(second_loop)
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_complete_from_multiple_threads_loop_is_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
logger.add(sink, catch=False, format="{message}")
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_from_multiple_threads_loop_is_not_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
loop = asyncio.new_event_loop()
logger.add(sink, catch=False, format="{message}", loop=loop)
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
async def async_subworker(logger_):
logger_.info("Child")
await logger_.complete()
async def async_mainworker(logger_):
logger_.info("Main")
await logger_.complete()
def subworker(logger_):
loop = asyncio.get_event_loop()
loop.run_until_complete(async_subworker(logger_))
class Writer:
def __init__(self):
self.output = ""
async def write(self, message):
self.output += message
def test_complete_with_sub_processes(monkeypatch, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
loop = asyncio.new_event_loop()
writer = Writer()
logger.add(writer.write, format="{message}", enqueue=True, loop=loop)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert out == err == ""
assert writer.output == "Child\n"
| 24.120787 | 97 | 0.638116 | import asyncio
import logging
import multiprocessing
import re
import sys
import threading
import pytest
import loguru
from loguru import logger
async def async_writer(msg):
await asyncio.sleep(0.01)
print(msg, end="")
class AsyncWriter:
async def __call__(self, msg):
await asyncio.sleep(0.01)
print(msg, end="")
def test_coroutine_function(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_async_callable_sink(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(AsyncWriter(), format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_concurrent_execution(capsys):
async def task(i):
logger.debug("=> {}", i)
async def main():
tasks = [task(i) for i in range(10)]
await asyncio.gather(*tasks)
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(main())
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("=> %d" % i for i in range(10))
def test_recursive_coroutine(capsys):
async def task(i):
if i == 0:
await logger.complete()
return
logger.info("{}!", i)
await task(i - 1)
logger.add(async_writer, format="{message}")
asyncio.run(task(9))
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("%d!" % i for i in range(1, 10))
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_using_another_event_loop(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_before_add(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_after_add(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
asyncio.set_event_loop(loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_run_mutiple_different_loops(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
logger.add(async_writer, format="{message}", loop=None)
asyncio.run(worker(1))
asyncio.run(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_run_multiple_same_loop(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
def test_run_multiple_same_loop_set_global(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_complete_in_another_run(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_complete_in_another_run_set_global(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_tasks_cancelled_on_remove(capsys):
logger.add(async_writer, format="{message}", catch=False)
async def foo():
logger.info("A")
logger.info("B")
logger.info("C")
logger.remove()
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_remove_without_tasks(capsys):
logger.add(async_writer, format="{message}", catch=False)
logger.remove()
async def foo():
logger.info("!")
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_without_tasks(capsys):
logger.add(async_writer, catch=False)
async def worker():
await logger.complete()
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_stream_noop(capsys):
logger.add(sys.stderr, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_complete_file_noop(tmpdir):
filepath = tmpdir.join("test.log")
logger.add(str(filepath), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert filepath.read() == "A\nB\nC\nD\n"
def test_complete_function_noop():
out = ""
def write(msg):
nonlocal out
out += msg
logger.add(write, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert out == "A\nB\nC\nD\n"
def test_complete_standard_noop(capsys):
logger.add(logging.StreamHandler(sys.stderr), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_exception_in_coroutine_caught(capsys):
async def sink(msg):
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_not_caught(capsys, caplog):
async def sink(msg):
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
def test_exception_in_coroutine_during_complete_caught(capsys):
async def sink(msg):
await asyncio.sleep(0.1)
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_during_complete_not_caught(capsys, caplog):
async def sink(msg):
await asyncio.sleep(0.1)
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_not_none(capsys):
loop = asyncio.new_event_loop()
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_enqueue_coroutine_loop_not_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_is_none(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
asyncio.run(worker("A"))
out, err = capsys.readouterr()
assert out == err == ""
loop.run_until_complete(worker("B"))
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_enqueue_coroutine_loop_is_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
loop.run_until_complete(worker("A"))
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_custom_complete_function(capsys):
awaited = False
class Handler:
def write(self, message):
print(message, end="")
async def complete(self):
nonlocal awaited
awaited = True
async def worker():
logger.info("A")
await logger.complete()
logger.add(Handler(), catch=False, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
assert awaited
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
main_loop.run_until_complete(worker_1())
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop_set_global(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_1())
asyncio.set_event_loop(second_loop)
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_complete_from_multiple_threads_loop_is_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
logger.add(sink, catch=False, format="{message}")
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_from_multiple_threads_loop_is_not_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
loop = asyncio.new_event_loop()
logger.add(sink, catch=False, format="{message}", loop=loop)
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
async def async_subworker(logger_):
logger_.info("Child")
await logger_.complete()
async def async_mainworker(logger_):
logger_.info("Main")
await logger_.complete()
def subworker(logger_):
loop = asyncio.get_event_loop()
loop.run_until_complete(async_subworker(logger_))
class Writer:
def __init__(self):
self.output = ""
async def write(self, message):
self.output += message
def test_complete_with_sub_processes(monkeypatch, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
loop = asyncio.new_event_loop()
writer = Writer()
logger.add(writer.write, format="{message}", enqueue=True, loop=loop)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert out == err == ""
assert writer.output == "Child\n"
| true | true |
f72b5a37ff02745e8949ae1f82a9e2a4b599b954 | 20,351 | py | Python | datagen.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | datagen.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | datagen.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | '''Load image/labels/boxes from an annotation file.
The list file is like:
img.jpg width height xmin ymin xmax ymax label xmin ymin xmax ymax label ...
'''
import random
import numpy as np
import json
import os
# from PIL import Image, ImageDraw, ImageFile
# ImageFile.LOAD_TRUNCATED_IMAGES = True
import cv2
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
class jsonDataset(data.Dataset):
def __init__(self, path, classes, transform, input_image_size, num_crops, fpn_level, is_norm_reg_target, radius,
view_image=False, min_cols=1, min_rows=1):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
input_size: (int) image shorter side size.
max_size: (int) maximum image longer side size.
'''
self.path = path
self.classes = classes
self.transform = transform
self.input_size = input_image_size
self.num_crops = num_crops
self.view_img = view_image
self.fpn_level = fpn_level
self.is_norm_reg_target = is_norm_reg_target
self.radius = radius
self.fnames = list()
self.offsets = list()
self.boxes = list()
self.labels = list()
self.num_classes = len(self.classes)
self.label_map = dict()
self.class_idx_map = dict()
# 0 is background class
for idx in range(0, self.num_classes):
self.label_map[self.classes[idx]] = idx+1 # 0 is background
self.class_idx_map[idx+1] = self.classes[idx]
self.data_encoder = DataEncoder(image_size=self.input_size,
num_classes=self.num_classes + 1,
fpn_level=self.fpn_level,
is_norm_reg_target=self.is_norm_reg_target)
fp_read = open(self.path, 'r')
gt_dict = json.load(fp_read)
all_boxes = list()
all_labels = list()
all_img_path = list()
# read gt files
for gt_key in gt_dict:
gt_data = gt_dict[gt_key][0]
box = list()
label = list()
num_boxes = len(gt_data['labels'])
img = cv2.imread(gt_data['image_path'])
img_rows = img.shape[0]
img_cols = img.shape[1]
for iter_box in range(0, num_boxes):
xmin = gt_data['boxes'][iter_box][0]
ymin = gt_data['boxes'][iter_box][1]
xmax = gt_data['boxes'][iter_box][2]
ymax = gt_data['boxes'][iter_box][3]
rows = ymax - ymin
cols = xmax - xmin
if xmin < 0 or ymin < 0:
print('negative coordinate: [xmin: ' + str(xmin) + ', ymin: ' + str(ymin) + ']')
print(gt_data['image_path'])
continue
if xmax > img_cols or ymax > img_rows:
print('over maximum size: [xmax: ' + str(xmax) + ', ymax: ' + str(ymax) + ']')
print(gt_data['image_path'])
continue
if cols < min_cols:
print('cols is lower than ' + str(min_cols) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +
str(xmax) + ', ' + str(ymax) + '] '
+ str(gt_data['image_path']))
continue
if rows < min_rows:
print('rows is lower than ' + str(min_rows) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +
str(xmax) + ', ' + str(ymax) + '] '
+ str(gt_data['image_path']))
continue
class_name = gt_data['labels'][iter_box][0]
if class_name not in self.label_map:
print('weired class name: ' + class_name)
print(gt_data['image_path'])
continue
class_idx = self.label_map[class_name]
box.append([float(xmin), float(ymin), float(xmax), float(ymax)])
label.append(int(class_idx))
if len(box) == 0 or len(label) == 0:
print('none of object exist in the image: ' + gt_data['image_path'])
continue
all_boxes.append(box)
all_labels.append(label)
all_img_path.append(gt_data['image_path'])
if len(all_boxes) == len(all_labels) and len(all_boxes) == len(all_img_path):
num_images = len(all_img_path)
else:
print('num. of boxes: ' + str(len(all_boxes)))
print('num. of labels: ' + str(len(all_labels)))
print('num. of paths: ' + str(len(all_img_path)))
raise ValueError('num. of elements are different(all boxes, all_labels, all_img_path)')
if num_crops <= 0:
for idx in range(0, num_images, 1):
self.fnames.append(all_img_path[idx])
self.boxes.append(torch.tensor(all_boxes[idx], dtype=torch.float32))
self.labels.append(torch.tensor(all_labels[idx], dtype=torch.int64))
else:
for idx in range(0, num_images, 1):
ori_boxes = all_boxes[idx]
ori_labels = all_labels[idx]
ori_img = cv2.imread(all_img_path[idx])
img_rows = ori_img.shape[0]
img_cols = ori_img.shape[1]
offsets, crop_boxes, crop_labels = self._do_crop(ori_img_rows=img_rows, ori_img_cols=img_cols,
target_img_size=self.input_size,
boxes=ori_boxes, labels=ori_labels)
num_offsets = len(offsets)
for idx_offset in range(0, num_offsets, 1):
self.fnames.append(all_img_path[idx])
self.offsets.append(offsets[idx_offset])
self.boxes.append(torch.tensor(crop_boxes[idx_offset], dtype=torch.float32))
self.labels.append(torch.tensor(crop_labels[idx_offset], dtype=torch.int64))
self.num_samples = len(self.fnames)
def __getitem__(self, idx):
# Load image and boxes.
fname = self.fnames[idx]
boxes = self.boxes[idx]
labels = self.labels[idx]
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.num_crops > 0:
offset = self.offsets[idx]
crop_rect = (int(offset[0]), int(offset[1]),
int(offset[0]+self.input_size[1]), int(offset[1]+self.input_size[0]))
if offset[0] < 0 or offset[1] < 0:
raise ValueError("negative offset!")
for box in boxes:
if box[0] < 0 or box[1] < 0 or box[2] > self.input_size[1] or box[3] > self.input_size[0]:
raise ValueError("negative box coordinate!")
img = img[crop_rect[1]:crop_rect[3], crop_rect[0]:crop_rect[2]]
bboxes = [bbox.tolist() + [label.item()] for bbox, label in zip(boxes, labels)]
augmented = self.transform(image=img, bboxes=bboxes)
img = augmented['image']
rows, cols = img.shape[1:]
boxes = augmented['bboxes']
boxes = [list(bbox) for bbox in boxes]
labels = [bbox.pop() for bbox in boxes]
if self.view_img is True:
np_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
np_img = np_img.numpy()
np_img = np.transpose(np_img, (1, 2, 0))
np_img = np.uint8(np_img * 255)
np_img = np.ascontiguousarray(np_img)
for idx_box, box in enumerate(boxes):
cv2.rectangle(np_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0))
class_idx = labels[idx_box]
text_size = cv2.getTextSize(self.class_idx_map[class_idx], cv2.FONT_HERSHEY_PLAIN, 1, 1)
cv2.putText(np_img, self.class_idx_map[class_idx], (int(box[0]), int(box[1]) - text_size[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imwrite(os.path.join("crop_test", str(idx)+".jpg"), np_img)
boxes = torch.tensor(boxes, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.int64)
return img, boxes, labels, fname
def __len__(self):
return self.num_samples
# def _resize(self, img, boxes):
# if isinstance(self.input_size, int) is True:
# w = h = self.input_size
# elif isinstance(self.input_size, tuple) is True:
# h = self.input_size[0]
# w = self.input_size[1]
# else:
# raise ValueError('input size should be int or tuple of ints')
#
# ws = 1.0 * w / img.shape[1]
# hs = 1.0 * h / img.shape[0]
# scale = torch.tensor([ws, hs, ws, hs], dtype=torch.float32)
# if boxes.numel() == 0:
# scaled_box = boxes
# else:
# scaled_box = scale * boxes
# return cv2.resize(img, (w, h)), scaled_box
def _do_crop(self, ori_img_rows, ori_img_cols, target_img_size, boxes, labels):
num_boxes = len(boxes)
num_labels = len(labels)
if num_boxes != num_labels:
print("error occur: Random crop")
rand_indices = [0, 1, 2, 3, 4]
np.random.shuffle(rand_indices)
output_offsets = []
output_boxes = []
output_labels = []
for box in boxes:
# box coordinate from 1. not 0.
xmin = box[0]
ymin = box[1]
xmax = box[2]
ymax = box[3]
width = (xmax - xmin)+1
height = (ymax - ymin)+1
if width < 0 or height< 0:
print("negative width/height")
continue
for iter_crop in range(0, self.num_crops, 1):
rand_idx = rand_indices[iter_crop]
margin = np.random.randint(16, 128, size=1)
# top-left
if rand_idx == 0:
offset_x = xmin-1-margin[0]
offset_y = ymin-1-margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
# top-right
elif rand_idx == 1:
offset_x = xmin - (target_img_size[1] - width)-1+margin[0]
offset_y = ymin-1-margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
# bottom-left
elif rand_idx == 2:
offset_x = xmin-1-margin[0]
offset_y = ymin - (target_img_size[0] - height)-1+margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
# bottom-right
elif rand_idx == 3:
offset_x = xmin - (target_img_size[1] - width)-1+margin[0]
offset_y = ymin - (target_img_size[0] - height)-1+margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
# center
elif rand_idx == 4:
rand_direction = np.random.randint(-1, 1, size=1)
offset_x = (xmin - ((target_img_size[1]-width)/2)-1) + (rand_direction[0] * margin[0])
offset_y = (ymin - ((target_img_size[0]-height)/2)-1) + (rand_direction[0] * margin[0])
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
else:
print("exceed possible crop num")
return output_offsets, output_boxes, output_labels
def _find_boxes_in_crop(self, crop_rect, boxes, labels):
num_boxes = len(boxes)
num_labels = len(labels)
if num_boxes != num_labels:
print("error occur: Random crop")
boxes_in_crop=[]
labels_in_crop = []
for idx in range(0, num_boxes, 1):
box_in_crop, label, is_contain = self._find_box_in_crop(crop_rect, boxes[idx], labels[idx])
if is_contain is True:
boxes_in_crop.append(box_in_crop)
labels_in_crop.append(label)
return boxes_in_crop, labels_in_crop
def _find_box_in_crop(self, rect, box, label):
rect_minx = rect[0]
rect_miny = rect[1]
rect_width = rect[2]
rect_height = rect[3]
box_minx = box[0]
box_miny = box[1]
box_maxx = box[2]
box_maxy = box[3]
box_width = (box_maxx - box_minx)+1
box_height = (box_maxy - box_miny)+1
# occlusion_ratio
occlusion_ratio = 0.3
occlusion_width = int(box_width * occlusion_ratio) * -1
occlusion_height = int(box_height * occlusion_ratio) * -1
box_in_crop_minx = box_minx - rect_minx
if box_in_crop_minx <= occlusion_width or box_in_crop_minx >= rect_width:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_miny = box_miny - rect_miny
if box_in_crop_miny <= occlusion_height or box_in_crop_miny >= rect_height:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_maxx = box_maxx - rect_minx
if rect_width - box_in_crop_maxx <= occlusion_width or box_in_crop_maxx <= 0:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_maxy = box_maxy - rect_miny
if rect_height - box_in_crop_maxy <= occlusion_height or box_in_crop_maxy <= 0:
box_in_rect = []
return box_in_rect, label, False
if box_in_crop_minx < 0:
box_in_crop_minx = 0
if box_in_crop_miny < 0:
box_in_crop_miny = 0
if rect_width - box_in_crop_maxx < 0:
box_in_crop_maxx = rect_width-1
if rect_height - box_in_crop_maxy < 0:
box_in_crop_maxy = rect_height-1
box_in_rect = [box_in_crop_minx, box_in_crop_miny, box_in_crop_maxx, box_in_crop_maxy]
return box_in_rect, label, True
def collate_fn(self, batch):
imgs = [x[0] for x in batch]
boxes = [x[1] for x in batch]
labels = [x[2] for x in batch]
paths = [x[3] for x in batch]
num_imgs = len(imgs)
if isinstance(self.input_size, int) is True:
inputs = torch.zeros([num_imgs, 3, self.input_size, self.input_size], dtype=torch.float32)
elif isinstance(self.input_size, tuple) is True:
inputs = torch.zeros([num_imgs, 3, self.input_size[0], self.input_size[1]], dtype=torch.float32)
else:
raise ValueError('input size should be int or tuple of ints')
loc_targets = list()
cls_targets = list()
center_targets = list()
for i in range(num_imgs):
im = imgs[i]
imh, imw = im.size(1), im.size(2)
inputs[i, :, :imh, :imw] = im
# Encode data.
loc_target, cls_target, center_target = self.data_encoder.encode(boxes[i], labels[i], radius=self.radius)
loc_targets.append(loc_target)
cls_targets.append(cls_target)
center_targets.append(center_target)
return inputs, \
torch.stack(loc_targets, dim=0), \
torch.stack(cls_targets, dim=0), \
torch.stack(center_targets, dim=0), \
paths
def test():
import torchvision
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
# ])
# set random seed
random.seed(3000)
np.random.seed(3000)
torch.manual_seed(3000)
transform = transforms.Compose([
transforms.ToTensor()
])
classes = 'person|bicycle|car|motorcycle|bus|truck|cat|dog|rider'
classes = classes.split('|')
dataset = jsonDataset(path='data/voc.json', classes=classes,transform=transform,
input_image_size=(256, 512), num_crops=-1, fpn_level=5, is_norm_reg_target=True, radius=0.8,
view_image=True, do_aug=True)
print(len(dataset))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=0,
collate_fn=dataset.collate_fn)
for idx, (images, loc_targets, cls_targets, center_targets, paths) in enumerate(dataloader):
print(loc_targets.shape)
print(cls_targets.shape)
print(center_targets.shape)
pos_ind = cls_targets[:, :, 0] <= 0
print(pos_ind.shape)
print(pos_ind.data.long().sum())
if __name__ == '__main__':
test()
| 38.110487 | 156 | 0.541644 | import random
import numpy as np
import json
import os
import cv2
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
class jsonDataset(data.Dataset):
def __init__(self, path, classes, transform, input_image_size, num_crops, fpn_level, is_norm_reg_target, radius,
view_image=False, min_cols=1, min_rows=1):
self.path = path
self.classes = classes
self.transform = transform
self.input_size = input_image_size
self.num_crops = num_crops
self.view_img = view_image
self.fpn_level = fpn_level
self.is_norm_reg_target = is_norm_reg_target
self.radius = radius
self.fnames = list()
self.offsets = list()
self.boxes = list()
self.labels = list()
self.num_classes = len(self.classes)
self.label_map = dict()
self.class_idx_map = dict()
for idx in range(0, self.num_classes):
self.label_map[self.classes[idx]] = idx+1
self.class_idx_map[idx+1] = self.classes[idx]
self.data_encoder = DataEncoder(image_size=self.input_size,
num_classes=self.num_classes + 1,
fpn_level=self.fpn_level,
is_norm_reg_target=self.is_norm_reg_target)
fp_read = open(self.path, 'r')
gt_dict = json.load(fp_read)
all_boxes = list()
all_labels = list()
all_img_path = list()
for gt_key in gt_dict:
gt_data = gt_dict[gt_key][0]
box = list()
label = list()
num_boxes = len(gt_data['labels'])
img = cv2.imread(gt_data['image_path'])
img_rows = img.shape[0]
img_cols = img.shape[1]
for iter_box in range(0, num_boxes):
xmin = gt_data['boxes'][iter_box][0]
ymin = gt_data['boxes'][iter_box][1]
xmax = gt_data['boxes'][iter_box][2]
ymax = gt_data['boxes'][iter_box][3]
rows = ymax - ymin
cols = xmax - xmin
if xmin < 0 or ymin < 0:
print('negative coordinate: [xmin: ' + str(xmin) + ', ymin: ' + str(ymin) + ']')
print(gt_data['image_path'])
continue
if xmax > img_cols or ymax > img_rows:
print('over maximum size: [xmax: ' + str(xmax) + ', ymax: ' + str(ymax) + ']')
print(gt_data['image_path'])
continue
if cols < min_cols:
print('cols is lower than ' + str(min_cols) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +
str(xmax) + ', ' + str(ymax) + '] '
+ str(gt_data['image_path']))
continue
if rows < min_rows:
print('rows is lower than ' + str(min_rows) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +
str(xmax) + ', ' + str(ymax) + '] '
+ str(gt_data['image_path']))
continue
class_name = gt_data['labels'][iter_box][0]
if class_name not in self.label_map:
print('weired class name: ' + class_name)
print(gt_data['image_path'])
continue
class_idx = self.label_map[class_name]
box.append([float(xmin), float(ymin), float(xmax), float(ymax)])
label.append(int(class_idx))
if len(box) == 0 or len(label) == 0:
print('none of object exist in the image: ' + gt_data['image_path'])
continue
all_boxes.append(box)
all_labels.append(label)
all_img_path.append(gt_data['image_path'])
if len(all_boxes) == len(all_labels) and len(all_boxes) == len(all_img_path):
num_images = len(all_img_path)
else:
print('num. of boxes: ' + str(len(all_boxes)))
print('num. of labels: ' + str(len(all_labels)))
print('num. of paths: ' + str(len(all_img_path)))
raise ValueError('num. of elements are different(all boxes, all_labels, all_img_path)')
if num_crops <= 0:
for idx in range(0, num_images, 1):
self.fnames.append(all_img_path[idx])
self.boxes.append(torch.tensor(all_boxes[idx], dtype=torch.float32))
self.labels.append(torch.tensor(all_labels[idx], dtype=torch.int64))
else:
for idx in range(0, num_images, 1):
ori_boxes = all_boxes[idx]
ori_labels = all_labels[idx]
ori_img = cv2.imread(all_img_path[idx])
img_rows = ori_img.shape[0]
img_cols = ori_img.shape[1]
offsets, crop_boxes, crop_labels = self._do_crop(ori_img_rows=img_rows, ori_img_cols=img_cols,
target_img_size=self.input_size,
boxes=ori_boxes, labels=ori_labels)
num_offsets = len(offsets)
for idx_offset in range(0, num_offsets, 1):
self.fnames.append(all_img_path[idx])
self.offsets.append(offsets[idx_offset])
self.boxes.append(torch.tensor(crop_boxes[idx_offset], dtype=torch.float32))
self.labels.append(torch.tensor(crop_labels[idx_offset], dtype=torch.int64))
self.num_samples = len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
boxes = self.boxes[idx]
labels = self.labels[idx]
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.num_crops > 0:
offset = self.offsets[idx]
crop_rect = (int(offset[0]), int(offset[1]),
int(offset[0]+self.input_size[1]), int(offset[1]+self.input_size[0]))
if offset[0] < 0 or offset[1] < 0:
raise ValueError("negative offset!")
for box in boxes:
if box[0] < 0 or box[1] < 0 or box[2] > self.input_size[1] or box[3] > self.input_size[0]:
raise ValueError("negative box coordinate!")
img = img[crop_rect[1]:crop_rect[3], crop_rect[0]:crop_rect[2]]
bboxes = [bbox.tolist() + [label.item()] for bbox, label in zip(boxes, labels)]
augmented = self.transform(image=img, bboxes=bboxes)
img = augmented['image']
rows, cols = img.shape[1:]
boxes = augmented['bboxes']
boxes = [list(bbox) for bbox in boxes]
labels = [bbox.pop() for bbox in boxes]
if self.view_img is True:
np_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
np_img = np_img.numpy()
np_img = np.transpose(np_img, (1, 2, 0))
np_img = np.uint8(np_img * 255)
np_img = np.ascontiguousarray(np_img)
for idx_box, box in enumerate(boxes):
cv2.rectangle(np_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0))
class_idx = labels[idx_box]
text_size = cv2.getTextSize(self.class_idx_map[class_idx], cv2.FONT_HERSHEY_PLAIN, 1, 1)
cv2.putText(np_img, self.class_idx_map[class_idx], (int(box[0]), int(box[1]) - text_size[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imwrite(os.path.join("crop_test", str(idx)+".jpg"), np_img)
boxes = torch.tensor(boxes, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.int64)
return img, boxes, labels, fname
def __len__(self):
return self.num_samples
def _do_crop(self, ori_img_rows, ori_img_cols, target_img_size, boxes, labels):
num_boxes = len(boxes)
num_labels = len(labels)
if num_boxes != num_labels:
print("error occur: Random crop")
rand_indices = [0, 1, 2, 3, 4]
np.random.shuffle(rand_indices)
output_offsets = []
output_boxes = []
output_labels = []
for box in boxes:
xmin = box[0]
ymin = box[1]
xmax = box[2]
ymax = box[3]
width = (xmax - xmin)+1
height = (ymax - ymin)+1
if width < 0 or height< 0:
print("negative width/height")
continue
for iter_crop in range(0, self.num_crops, 1):
rand_idx = rand_indices[iter_crop]
margin = np.random.randint(16, 128, size=1)
if rand_idx == 0:
offset_x = xmin-1-margin[0]
offset_y = ymin-1-margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
elif rand_idx == 1:
offset_x = xmin - (target_img_size[1] - width)-1+margin[0]
offset_y = ymin-1-margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
elif rand_idx == 2:
offset_x = xmin-1-margin[0]
offset_y = ymin - (target_img_size[0] - height)-1+margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
elif rand_idx == 3:
offset_x = xmin - (target_img_size[1] - width)-1+margin[0]
offset_y = ymin - (target_img_size[0] - height)-1+margin[0]
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
elif rand_idx == 4:
rand_direction = np.random.randint(-1, 1, size=1)
offset_x = (xmin - ((target_img_size[1]-width)/2)-1) + (rand_direction[0] * margin[0])
offset_y = (ymin - ((target_img_size[0]-height)/2)-1) + (rand_direction[0] * margin[0])
crop_maxx = offset_x + target_img_size[1]
crop_maxy = offset_y + target_img_size[0]
if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:
continue
if offset_x < 0 or offset_y < 0:
continue
crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]
in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)
if len(in_boxes) == 0:
continue
output_offsets.append([offset_x, offset_y])
output_boxes.append(in_boxes)
output_labels.append(in_labels)
else:
print("exceed possible crop num")
return output_offsets, output_boxes, output_labels
def _find_boxes_in_crop(self, crop_rect, boxes, labels):
num_boxes = len(boxes)
num_labels = len(labels)
if num_boxes != num_labels:
print("error occur: Random crop")
boxes_in_crop=[]
labels_in_crop = []
for idx in range(0, num_boxes, 1):
box_in_crop, label, is_contain = self._find_box_in_crop(crop_rect, boxes[idx], labels[idx])
if is_contain is True:
boxes_in_crop.append(box_in_crop)
labels_in_crop.append(label)
return boxes_in_crop, labels_in_crop
def _find_box_in_crop(self, rect, box, label):
rect_minx = rect[0]
rect_miny = rect[1]
rect_width = rect[2]
rect_height = rect[3]
box_minx = box[0]
box_miny = box[1]
box_maxx = box[2]
box_maxy = box[3]
box_width = (box_maxx - box_minx)+1
box_height = (box_maxy - box_miny)+1
occlusion_ratio = 0.3
occlusion_width = int(box_width * occlusion_ratio) * -1
occlusion_height = int(box_height * occlusion_ratio) * -1
box_in_crop_minx = box_minx - rect_minx
if box_in_crop_minx <= occlusion_width or box_in_crop_minx >= rect_width:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_miny = box_miny - rect_miny
if box_in_crop_miny <= occlusion_height or box_in_crop_miny >= rect_height:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_maxx = box_maxx - rect_minx
if rect_width - box_in_crop_maxx <= occlusion_width or box_in_crop_maxx <= 0:
box_in_rect = []
return box_in_rect, label, False
box_in_crop_maxy = box_maxy - rect_miny
if rect_height - box_in_crop_maxy <= occlusion_height or box_in_crop_maxy <= 0:
box_in_rect = []
return box_in_rect, label, False
if box_in_crop_minx < 0:
box_in_crop_minx = 0
if box_in_crop_miny < 0:
box_in_crop_miny = 0
if rect_width - box_in_crop_maxx < 0:
box_in_crop_maxx = rect_width-1
if rect_height - box_in_crop_maxy < 0:
box_in_crop_maxy = rect_height-1
box_in_rect = [box_in_crop_minx, box_in_crop_miny, box_in_crop_maxx, box_in_crop_maxy]
return box_in_rect, label, True
def collate_fn(self, batch):
imgs = [x[0] for x in batch]
boxes = [x[1] for x in batch]
labels = [x[2] for x in batch]
paths = [x[3] for x in batch]
num_imgs = len(imgs)
if isinstance(self.input_size, int) is True:
inputs = torch.zeros([num_imgs, 3, self.input_size, self.input_size], dtype=torch.float32)
elif isinstance(self.input_size, tuple) is True:
inputs = torch.zeros([num_imgs, 3, self.input_size[0], self.input_size[1]], dtype=torch.float32)
else:
raise ValueError('input size should be int or tuple of ints')
loc_targets = list()
cls_targets = list()
center_targets = list()
for i in range(num_imgs):
im = imgs[i]
imh, imw = im.size(1), im.size(2)
inputs[i, :, :imh, :imw] = im
loc_target, cls_target, center_target = self.data_encoder.encode(boxes[i], labels[i], radius=self.radius)
loc_targets.append(loc_target)
cls_targets.append(cls_target)
center_targets.append(center_target)
return inputs, \
torch.stack(loc_targets, dim=0), \
torch.stack(cls_targets, dim=0), \
torch.stack(center_targets, dim=0), \
paths
def test():
import torchvision
random.seed(3000)
np.random.seed(3000)
torch.manual_seed(3000)
transform = transforms.Compose([
transforms.ToTensor()
])
classes = 'person|bicycle|car|motorcycle|bus|truck|cat|dog|rider'
classes = classes.split('|')
dataset = jsonDataset(path='data/voc.json', classes=classes,transform=transform,
input_image_size=(256, 512), num_crops=-1, fpn_level=5, is_norm_reg_target=True, radius=0.8,
view_image=True, do_aug=True)
print(len(dataset))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=0,
collate_fn=dataset.collate_fn)
for idx, (images, loc_targets, cls_targets, center_targets, paths) in enumerate(dataloader):
print(loc_targets.shape)
print(cls_targets.shape)
print(center_targets.shape)
pos_ind = cls_targets[:, :, 0] <= 0
print(pos_ind.shape)
print(pos_ind.data.long().sum())
if __name__ == '__main__':
test()
| true | true |
f72b5bc149c5ba2f2e841366355f1137ce247df7 | 10,256 | py | Python | meatpy/itch50/itch50_message_parser.py | vishalbelsare/MeatPy | d4f22c4aca750b7b51858383c21ee573d6481e44 | [
"BSD-3-Clause"
] | 5 | 2021-07-21T22:19:18.000Z | 2022-03-20T02:39:27.000Z | meatpy/itch50/itch50_message_parser.py | vishalbelsare/MeatPy | d4f22c4aca750b7b51858383c21ee573d6481e44 | [
"BSD-3-Clause"
] | 1 | 2021-09-22T20:13:56.000Z | 2021-09-25T14:47:54.000Z | meatpy/itch50/itch50_message_parser.py | vishalbelsare/MeatPy | d4f22c4aca750b7b51858383c21ee573d6481e44 | [
"BSD-3-Clause"
] | 4 | 2020-12-11T02:29:52.000Z | 2021-11-06T04:00:46.000Z | """itch50_message_parser.py: Message parser class for ITCH 5.0"""
__author__ = "Vincent Grégoire"
__email__ = "vincent.gregoire@gmail.com"
from copy import deepcopy
import meatpy.itch50.itch50_market_message
from meatpy.message_parser import MessageParser
class ITCH50MessageParser(MessageParser):
"""A market message parser for ITCH 5.0 data.
"""
def __init__(self):
self.keep_messages_types = b'SAFECXDUBHRYPQINLVWK'
self.skip_stock_messages = False
self.order_refs = {}
self.stocks = None
self.matches = {}
self.counter = 0
self.stock_directory = []
self.system_messages = []
# Output settings
self.output_prefix = ''
self.message_buffer = 2000 # Per stock buffer size
self.global_write_trigger = 1000000 # Check if buffers exceeded
super(ITCH50MessageParser, self).__init__()
def write_file(self, file, in_messages=None):
"""Write the messages to a csv file in a compatible format
The messages are written to a file that could be again parsed by
the parser. In no messages are provided, the current message queue
is used.
:param file: file to write to
:type file: file
:param in_messages: messages to output
:type in_messages: list of ITCH50MarketMessage
"""
if in_messages is None:
messages = self.stock_directory
else:
messages = in_messages
for x in messages:
file.write(b'\x00')
file.write(chr(x.message_size))
file.write(x.pack())
def parse_file(self, file, write=False):
"""Parse the content of the file to generate ITCH50MarketMessage
objects.
Flag indicates if parsing output is written at the same time instead
of kept in memory.
"""
# Init containers
self.counter = 0
self.order_refs = {}
self.stock_messages = {}
self.matches = {}
self.stock_messages = {}
self.stock_directory = []
self.system_messages = []
self.latest_timestamp = None
maxMessageSize = 52 # Largest possible message in ITCH
cachesize = 1024*4
haveData = True
EOFreached = False
dataBuffer = file.read(cachesize)
buflen = len(dataBuffer)
while haveData is True:
# Process next message
byte = dataBuffer[0:1]
if byte != b'\x00':
raise Exception('ITCH50MessageParser:ITCH_factory',
'Unexpected byte: ' + str(byte))
messageLen = ord(dataBuffer[1:2])
message = self.ITCH_factory(dataBuffer[2:2+messageLen])
self.process_message(message)
if message.type == b'S': # System message
if message.code == b'C': # End of messages
break
# Check if we need to write the cache for the stock
if write and self.counter % self.global_write_trigger == 0:
for x in self.stock_messages:
self.write_stock(x)
# Remove the message from buffer
dataBuffer = dataBuffer[2+messageLen:]
buflen = len(dataBuffer)
if EOFreached and (buflen == 0):
haveData = False
# If we don't have enough, read more
if buflen < maxMessageSize and not EOFreached:
newData = file.read(cachesize)
if newData == b'':
EOFreached = True
if buflen == 0:
haveData = False
else:
dataBuffer = dataBuffer + newData
buflen = len(dataBuffer)
# Write all unempty buffers
if write:
for x in self.stock_messages:
self.write_stock(stock=x, overlook_buffer=True)
def write_stock(self, stock, overlook_buffer=False):
if (len(self.stock_messages[stock]) > self.message_buffer or
overlook_buffer):
stock_str = stock.decode()
with open(self.output_prefix +
stock_str.strip().replace('*', '8')+'.txt', 'a+b') as file:
# * in stock symbols replaced by 8
for x in self.stock_messages[stock]:
file.write(b'\x00')
file.write(bytes([x.message_size]))
file.write(x.pack())
self.stock_messages[stock] = []
def append_stock_message(self, stock, message):
"""Append the message to the stock message queue
Initialises the queue if empty"""
if self.stocks is None or stock in self.stocks:
if self.skip_stock_messages:
return
if stock not in self.stock_messages:
self.stock_messages[stock] = deepcopy(self.system_messages)
self.stock_messages[stock].append(message)
def process_message(self, message):
"""
Looks at the message and decides what to do with it.
Could be keep, discard, send to file, etc.
"""
self.counter += 1
if self.counter % 1000000 == 0:
print( "Processing message no " + str(self.counter))
if message.type not in self.keep_messages_types:
return
if message.type in b'R':
self.stock_directory.append(message)
self.append_stock_message(message.stock, message)
elif message.type in b'SVW':
# Pass-through all system messages
for x in self.stock_messages:
self.append_stock_message(x, message)
self.system_messages.append(message)
elif message.type in b'HYQINKLJh':
if self.stocks is None or message.stock in self.stocks:
self.append_stock_message(message.stock, message)
elif message.type in b'AF':
if self.stocks is None or message.stock in self.stocks:
self.order_refs[message.orderRefNum] = message.stock
self.append_stock_message(message.stock, message)
elif message.type in b'ECXD':
if message.orderRefNum in self.order_refs:
stock = self.order_refs[message.orderRefNum]
self.append_stock_message(stock, message)
if message.type in b'D':
del self.order_refs[message.orderRefNum]
elif message.type in b'EC':
self.matches[message.match] = stock
elif message.type in b'U':
if message.origOrderRefNum in self.order_refs:
stock = self.order_refs[message.origOrderRefNum]
self.append_stock_message(stock, message)
del self.order_refs[message.origOrderRefNum]
self.order_refs[message.newOrderRefNum] = stock
elif message.type in b'B':
if message.match in self.matches:
stock = self.matches[message.match]
self.append_stock_message(stock, message)
elif message.type in b'P':
if self.stocks is None or message.stock in self.stocks:
self.append_stock_message(message.stock, message)
self.matches[message.match] = message.stock
def ITCH_factory(self, message):
'''
Pass this factory an entire bytearray and you will be
given the appropriate ITCH message
'''
msgtype = chr(message[0])
if msgtype == 'S':
return meatpy.itch50.itch50_market_message.SystemEventMessage(message)
elif msgtype == 'R':
return meatpy.itch50.itch50_market_message.StockDirectoryMessage(message)
elif msgtype == 'H':
return meatpy.itch50.itch50_market_message.StockTradingActionMessage(message)
elif msgtype == 'Y':
return meatpy.itch50.itch50_market_message.RegSHOMessage(message)
elif msgtype == 'L':
return meatpy.itch50.itch50_market_message.MarketParticipantPositionMessage(message)
elif msgtype == 'V':
return meatpy.itch50.itch50_market_message.MWCBDeclineLevelMessage(message)
elif msgtype == 'W':
return meatpy.itch50.itch50_market_message.MWCBBreachMessage(message)
elif msgtype == 'K':
return meatpy.itch50.itch50_market_message.IPOQuotingPeriodUpdateMessage(message)
elif msgtype == 'A':
return meatpy.itch50.itch50_market_message.AddOrderMessage(message)
elif msgtype == 'F':
return meatpy.itch50.itch50_market_message.AddOrderMPIDMessage(message)
elif msgtype == 'E':
return meatpy.itch50.itch50_market_message.OrderExecutedMessage(message)
elif msgtype == 'C':
return meatpy.itch50.itch50_market_message.OrderExecutedPriceMessage(message)
elif msgtype == 'X':
return meatpy.itch50.itch50_market_message.OrderCancelMessage(message)
elif msgtype == 'D':
return meatpy.itch50.itch50_market_message.OrderDeleteMessage(message)
elif msgtype == 'U':
return meatpy.itch50.itch50_market_message.OrderReplaceMessage(message)
elif msgtype == 'P':
return meatpy.itch50.itch50_market_message.TradeMessage(message)
elif msgtype == 'Q':
return meatpy.itch50.itch50_market_message.CrossTradeMessage(message)
elif msgtype == 'B':
return meatpy.itch50.itch50_market_message.BrokenTradeMessage(message)
elif msgtype == 'I':
return meatpy.itch50.itch50_market_message.NoiiMessage(message)
elif msgtype == 'N':
return meatpy.itch50.itch50_market_message.RpiiMessage(message)
elif msgtype == 'J':
return meatpy.itch50.itch50_market_message.LULDAuctionCollarMessage(message)
elif msgtype == 'h':
return meatpy.itch50.itch50_market_message.OperationalHaltMessage(message)
else:
raise Exception('ITCH50MessageParser:ITCH_factory',
'Unknown message type: '+ str(msgtype))
| 41.354839 | 96 | 0.604524 |
__author__ = "Vincent Grégoire"
__email__ = "vincent.gregoire@gmail.com"
from copy import deepcopy
import meatpy.itch50.itch50_market_message
from meatpy.message_parser import MessageParser
class ITCH50MessageParser(MessageParser):
def __init__(self):
self.keep_messages_types = b'SAFECXDUBHRYPQINLVWK'
self.skip_stock_messages = False
self.order_refs = {}
self.stocks = None
self.matches = {}
self.counter = 0
self.stock_directory = []
self.system_messages = []
self.output_prefix = ''
self.message_buffer = 2000
self.global_write_trigger = 1000000
super(ITCH50MessageParser, self).__init__()
def write_file(self, file, in_messages=None):
if in_messages is None:
messages = self.stock_directory
else:
messages = in_messages
for x in messages:
file.write(b'\x00')
file.write(chr(x.message_size))
file.write(x.pack())
def parse_file(self, file, write=False):
self.counter = 0
self.order_refs = {}
self.stock_messages = {}
self.matches = {}
self.stock_messages = {}
self.stock_directory = []
self.system_messages = []
self.latest_timestamp = None
maxMessageSize = 52
cachesize = 1024*4
haveData = True
EOFreached = False
dataBuffer = file.read(cachesize)
buflen = len(dataBuffer)
while haveData is True:
byte = dataBuffer[0:1]
if byte != b'\x00':
raise Exception('ITCH50MessageParser:ITCH_factory',
'Unexpected byte: ' + str(byte))
messageLen = ord(dataBuffer[1:2])
message = self.ITCH_factory(dataBuffer[2:2+messageLen])
self.process_message(message)
if message.type == b'S':
if message.code == b'C':
break
if write and self.counter % self.global_write_trigger == 0:
for x in self.stock_messages:
self.write_stock(x)
dataBuffer = dataBuffer[2+messageLen:]
buflen = len(dataBuffer)
if EOFreached and (buflen == 0):
haveData = False
if buflen < maxMessageSize and not EOFreached:
newData = file.read(cachesize)
if newData == b'':
EOFreached = True
if buflen == 0:
haveData = False
else:
dataBuffer = dataBuffer + newData
buflen = len(dataBuffer)
# Write all unempty buffers
if write:
for x in self.stock_messages:
self.write_stock(stock=x, overlook_buffer=True)
def write_stock(self, stock, overlook_buffer=False):
if (len(self.stock_messages[stock]) > self.message_buffer or
overlook_buffer):
stock_str = stock.decode()
with open(self.output_prefix +
stock_str.strip().replace('*', '8')+'.txt', 'a+b') as file:
# * in stock symbols replaced by 8
for x in self.stock_messages[stock]:
file.write(b'\x00')
file.write(bytes([x.message_size]))
file.write(x.pack())
self.stock_messages[stock] = []
def append_stock_message(self, stock, message):
if self.stocks is None or stock in self.stocks:
if self.skip_stock_messages:
return
if stock not in self.stock_messages:
self.stock_messages[stock] = deepcopy(self.system_messages)
self.stock_messages[stock].append(message)
def process_message(self, message):
self.counter += 1
if self.counter % 1000000 == 0:
print( "Processing message no " + str(self.counter))
if message.type not in self.keep_messages_types:
return
if message.type in b'R':
self.stock_directory.append(message)
self.append_stock_message(message.stock, message)
elif message.type in b'SVW':
# Pass-through all system messages
for x in self.stock_messages:
self.append_stock_message(x, message)
self.system_messages.append(message)
elif message.type in b'HYQINKLJh':
if self.stocks is None or message.stock in self.stocks:
self.append_stock_message(message.stock, message)
elif message.type in b'AF':
if self.stocks is None or message.stock in self.stocks:
self.order_refs[message.orderRefNum] = message.stock
self.append_stock_message(message.stock, message)
elif message.type in b'ECXD':
if message.orderRefNum in self.order_refs:
stock = self.order_refs[message.orderRefNum]
self.append_stock_message(stock, message)
if message.type in b'D':
del self.order_refs[message.orderRefNum]
elif message.type in b'EC':
self.matches[message.match] = stock
elif message.type in b'U':
if message.origOrderRefNum in self.order_refs:
stock = self.order_refs[message.origOrderRefNum]
self.append_stock_message(stock, message)
del self.order_refs[message.origOrderRefNum]
self.order_refs[message.newOrderRefNum] = stock
elif message.type in b'B':
if message.match in self.matches:
stock = self.matches[message.match]
self.append_stock_message(stock, message)
elif message.type in b'P':
if self.stocks is None or message.stock in self.stocks:
self.append_stock_message(message.stock, message)
self.matches[message.match] = message.stock
def ITCH_factory(self, message):
msgtype = chr(message[0])
if msgtype == 'S':
return meatpy.itch50.itch50_market_message.SystemEventMessage(message)
elif msgtype == 'R':
return meatpy.itch50.itch50_market_message.StockDirectoryMessage(message)
elif msgtype == 'H':
return meatpy.itch50.itch50_market_message.StockTradingActionMessage(message)
elif msgtype == 'Y':
return meatpy.itch50.itch50_market_message.RegSHOMessage(message)
elif msgtype == 'L':
return meatpy.itch50.itch50_market_message.MarketParticipantPositionMessage(message)
elif msgtype == 'V':
return meatpy.itch50.itch50_market_message.MWCBDeclineLevelMessage(message)
elif msgtype == 'W':
return meatpy.itch50.itch50_market_message.MWCBBreachMessage(message)
elif msgtype == 'K':
return meatpy.itch50.itch50_market_message.IPOQuotingPeriodUpdateMessage(message)
elif msgtype == 'A':
return meatpy.itch50.itch50_market_message.AddOrderMessage(message)
elif msgtype == 'F':
return meatpy.itch50.itch50_market_message.AddOrderMPIDMessage(message)
elif msgtype == 'E':
return meatpy.itch50.itch50_market_message.OrderExecutedMessage(message)
elif msgtype == 'C':
return meatpy.itch50.itch50_market_message.OrderExecutedPriceMessage(message)
elif msgtype == 'X':
return meatpy.itch50.itch50_market_message.OrderCancelMessage(message)
elif msgtype == 'D':
return meatpy.itch50.itch50_market_message.OrderDeleteMessage(message)
elif msgtype == 'U':
return meatpy.itch50.itch50_market_message.OrderReplaceMessage(message)
elif msgtype == 'P':
return meatpy.itch50.itch50_market_message.TradeMessage(message)
elif msgtype == 'Q':
return meatpy.itch50.itch50_market_message.CrossTradeMessage(message)
elif msgtype == 'B':
return meatpy.itch50.itch50_market_message.BrokenTradeMessage(message)
elif msgtype == 'I':
return meatpy.itch50.itch50_market_message.NoiiMessage(message)
elif msgtype == 'N':
return meatpy.itch50.itch50_market_message.RpiiMessage(message)
elif msgtype == 'J':
return meatpy.itch50.itch50_market_message.LULDAuctionCollarMessage(message)
elif msgtype == 'h':
return meatpy.itch50.itch50_market_message.OperationalHaltMessage(message)
else:
raise Exception('ITCH50MessageParser:ITCH_factory',
'Unknown message type: '+ str(msgtype))
| true | true |
f72b5ca0b6e649f1aa5b09952cf5e59898061a4c | 34,977 | py | Python | SigProfilerMatrixGenerator/install.py | edawson/SigProfilerMatrixGenerator | bd6d3bb15e87805cdc7e771c3fdd886f4a9fc29b | [
"BSD-2-Clause"
] | null | null | null | SigProfilerMatrixGenerator/install.py | edawson/SigProfilerMatrixGenerator | bd6d3bb15e87805cdc7e771c3fdd886f4a9fc29b | [
"BSD-2-Clause"
] | null | null | null | SigProfilerMatrixGenerator/install.py | edawson/SigProfilerMatrixGenerator | bd6d3bb15e87805cdc7e771c3fdd886f4a9fc29b | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#Author: Erik Bergstrom
#Contact: ebergstr@eng.ucsd.edu
from __future__ import print_function
import os
import sys
import re
import subprocess
import argparse
import time
from scipy import spatial
import pandas as pd
import shutil
import logging
import hashlib
from SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn
from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return (hash_md5.hexdigest())
def install_chromosomes (genomes, ref_dir, custom, rsync, bash):
if custom:
for genome in genomes:
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
chromosome_fasta_path = "references/chromosomes/fasta/" + genome + "/"
os.system("python scripts/save_chrom_strings.py -g " + genome)
print("Chromosome string files for " + genome + " have been created. Continuing with installation.")
#os.system("rm -r " + chromosome_fasta_path)
else:
for genome in genomes:
species = None
chrom_number = None
if genome == 'GRCh37' or genome == 'GRCh38':
species = "homo_sapiens"
chrom_number = 24
elif genome == 'mm10' or genome == 'mm9':
species = "mus_musculus"
chrom_number = 21
elif genome == 'rn6':
species = 'rattus_norvegicus'
chrom_number = 22
else:
print(genome + " is not supported. The following genomes are supported:\nGRCh37, GRCh38, mm10")
sys.exit()
chromosome_string_path = "references/chromosomes/chrom_string/" + genome + "/"
chromosome_fasta_path = "references/chromosomes/fasta/" + genome + "/"
if os.path.exists(ref_dir + "chromosomes/tsb/" + genome) and len(os.listdir(ref_dir + "chromosomes/tsb/" + genome)) >= chrom_number:
break
wget_flag = True
if os.path.exists(chromosome_string_path) == False or len(os.listdir(chromosome_string_path)) <= chrom_number:
print("[DEBUG] Chromosome string files found at: " + ref_dir + chromosome_string_path)
if os.path.exists(chromosome_fasta_path) == False or len(os.listdir(chromosome_fasta_path)) <= chrom_number:
print("[DEBUG] Chromosome fasta files found at: " + ref_dir + chromosome_fasta_path)
print("Chromosomes are not currently saved as individual text files for " + genome + ". Downloading the files now...")
if not rsync:
#os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/update/fasta/homo_sapiens/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
# try:
# p = subprocess.Popen("wget", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# except:
# proceed = input("You may not have wget or homebrew installed. Download those dependencies now?[Y/N]").upper()
# if proceed == 'Y':
# try:
# os.system("brew install wget")
# except:
# os.system('/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"')
# os.system("brew install wget")
# else:
# print("Installation has stopped. Please download the chromosome files before proceeding with the installation.")
# wget_flag = False
# sys.exit()
if wget_flag:
try:
if genome == 'GRCh37':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log')
#os.system("wget -r -l1 -c -nc --no-parent -A '*.dna.chromosome.*' -nd -P " + chromosome_fasta_path + " ftp://ftp.ensembl.org/pub/grch37/update/fasta/homo_sapiens/dna/ 2>> install.log")
elif genome == 'mm9':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log')
elif genome == 'rn6':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log')
else:
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log')
#os.system("gunzip references/chromosomes/fasta/" + genome + "/*.gz")
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
else:
try:
if genome == 'GRCh37':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
elif genome == 'mm9':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
elif genome == 'rn6':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ " + chromosome_fasta_path + " 2>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ " + chromosome_fasta_path + " 2>> install.log")
else:
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/"+species+"/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/"+species+"/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
#os.system("gunzip references/chromosomes/fasta/" + genome + "/*.gz")
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
print("Chromosome fasta files for " + genome + " have been installed. Creating the chromosome string files now...")
os.system("python scripts/save_chrom_strings.py -g " + genome)
print("Chromosome string files for " + genome + " have been created. Continuing with installation.")
# os.system("rm -r " + chromosome_fasta_path)
# os.remove(chromosome_fasta_path)
shutil.rmtree(chromosome_fasta_path)
else:
print("Chromosome reference files exist for " + genome + ". Continuing with installation.")
def install_chromosomes_tsb (genomes, ref_dir, custom):
check_sum = {'GRCh37':
{'1':'a7d51305e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',
'4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',
'7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',
'10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',
'13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',
'16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',
'19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',
'22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'GRCh38':
{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',
'4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',
'7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',
'10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',
'13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',
'16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',
'19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',
'22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'mm9':
{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',
'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',
'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',
'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',
'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',
'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',
'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'mm10':
{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',
'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',
'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',
'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',
'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',
'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',
'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'rn6':
{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',
'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',
'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',
'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',
'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',
'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',
'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',
'X':'7a06bafab97c59a819f03633f0a6b7a2'},
'c_elegans':
{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',
'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',
'MtDNA':'48983f530959780de0125f74a87d4fc1'},
'dog':
{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',
'4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',
'7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',
'10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',
'13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',
'16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',
'19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',
'22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',
'25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',
'28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',
'31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',
'34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',
'37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}
for genome in genomes:
chrom_number = None
if genome == 'GRCh37' or genome == 'GRCh38':
chrom_number = 24
elif genome == 'mm10' or genome == 'mm9':
chrom_number = 21
elif genome == 'rn6':
chrom_number = 22
chromosome_TSB_path = "references/chromosomes/tsb/" + genome + "/"
transcript_files = "references/chromosomes/transcripts/" + genome + "/"
print("[DEBUG] Chromosome tsb files found at: " + ref_dir + chromosome_TSB_path)
if os.path.exists(transcript_files) == False or len(os.listdir(transcript_files)) < 1:
print("Please download the transcript files before proceeding. You can download the files from 'http://www.ensembl.org/biomart/martview'.")
print("Follow the format presented in the README file:\n\n\tGene stable ID Transcript stable ID Chromosome/scaffold name Strand Transcript start (bp) Transcript end (bp)\n\n\n")
sys.exit()
if os.path.exists(chromosome_TSB_path) == False or len(os.listdir(chromosome_TSB_path)) < chrom_number:
print("The transcriptional reference data for " + genome + " has not been saved. Creating these files now")
os.system("python scripts/save_tsb_192.py -g " + genome)
corrupt = False
for files in os.listdir(chromosome_TSB_path):
if "proportions" in files:
continue
if ".DS_Store" in files:
continue
chrom = files.split(".")
chrom = chrom[0]
check = md5(chromosome_TSB_path + files)
if check_sum[genome][chrom] != check:
corrupt = True
os.remove(chromosome_TSB_path + files)
print("[DEBUG] Chromosome " + chrom + " md5sum did not match => reference md5sum: " + str(check_sum[genome][chrom]) + " new file md5sum: " + str(check))
if corrupt:
print("The transcriptional reference data appears to be corrupted. Please reinstall the " + genome + " genome.")
sys.exit()
print("The transcriptional reference data for " + genome + " has been saved.")
def install_chromosomes_tsb_BED (genomes, custom, ref_dir):
for genome in genomes:
if not os.path.exists(ref_dir + "chromosomes/tsb_BED/" + genome + "/") or len(os.listdir(ref_dir + "chromosomes/tsb_BED/" + genome + "/")) < 19:
os.system("python scripts/save_chrom_tsb_separate.py -g " + genome)
print("The TSB BED files for " + genome + " have been saved.")
def benchmark (genome, ref_dir):
#current_dir = os.path.realpath(__file__)
#ref_dir = re.sub('\/install.py$', '', current_dir)
ref_dir = os.path.dirname(os.path.abspath(__file__))
vcf_path = ref_dir + "/references/vcf_files/" + genome + "_bench/"
start_time = time.time()
matGen.SigProfilerMatrixGeneratorFunc(genome + "_bench", genome, vcf_path)
end_time = time.time()
original_matrix_96 = ref_dir + "/scripts/Benchmark/" + genome + "_bench_orig_96.txt"
original_matrix_3072 = ref_dir + "/scripts/Benchmark/" + genome + "_bench_orig_3072.txt"
new_matrix_96 = vcf_path + "output/SBS/" + genome + "_bench.SBS96.all"
new_matrix_3072 = vcf_path + "output/SBS/" + genome + "_bench.SBS6144.all"
#genome = "GRCh37"
############# Cosine Test ###################################################
data_orig = pd.read_csv(original_matrix_96, sep='\t', header=0)
data_new = pd.read_csv(new_matrix_96, sep='\t', header=0)
count = 0
range_count = min(len(data_orig.loc[0]), len(data_new.loc[0]))
for i in range (1, range_count, 1):
orig_list = list(data_orig[data_orig.columns[i]])
new_list = list(data_new[data_new.columns[i]])
cosine_result = (1-spatial.distance.cosine(orig_list,new_list))
if cosine_result != 1:
count += 1
if count != 0:
print("There seems to be some errors in the newly generated matrix. The installation may not have been successful.")
data_orig = pd.read_csv(original_matrix_3072, sep='\t', header=0)
data_new = pd.read_csv(new_matrix_3072, sep='\t', header=0)
count = 0
range_count = min(len(data_orig.loc[0]), len(data_new.loc[0]))
for i in range (1, range_count, 1):
orig_list = data_orig[data_orig.columns[i]]
new_list = data_new[data_new.columns[i]]
cosine_result = (1-spatial.distance.cosine(orig_list,new_list))
if cosine_result <= 0.85:
count += 1
if count != 0:
print("There seems to be some errors in the newly generated matrix. The installation may not have been successful.")
end_time = time.time()
print("Installation was succesful.\nSigProfilerMatrixGenerator took " + str(end_time-start_time) + " seconds to complete.")
def install (genome, custom=False, rsync=False, bash=True, ftp=True):
first_path= os.getcwd()
ref_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(ref_dir)
if os.path.exists("install.log"):
os.remove("install.log")
#ref_dir += "/references/"
chrom_string_dir = ref_dir + "/references/chromosomes/chrom_string/"
chrom_fasta_dir = ref_dir + "/references/chromosomes/fasta/"
chrom_tsb_dir = ref_dir + "/references/chromosomes/tsb/"
matrix_dir = ref_dir + "/references/matrix/"
vcf_dir = ref_dir + "/references/vcf_files/"
bed_dir = ref_dir + "/references/vcf_files/BED/"
log_dir = "logs/"
new_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]
for dirs in new_dirs:
if not os.path.exists(dirs):
os.makedirs(dirs)
if ftp:
check_sum = {'GRCh37':
{'1':'a7d51305e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',
'4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',
'7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',
'10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',
'13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',
'16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',
'19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',
'22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'GRCh38':
{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',
'4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',
'7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',
'10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',
'13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',
'16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',
'19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',
'22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'mm9':
{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',
'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',
'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',
'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',
'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',
'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',
'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'mm10':
{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',
'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',
'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',
'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',
'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',
'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',
'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'rn6':
{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',
'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',
'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',
'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',
'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',
'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',
'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',
'X':'7a06bafab97c59a819f03633f0a6b7a2'},
'c_elegans':
{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',
'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',
'MtDNA':'48983f530959780de0125f74a87d4fc1'},
'dog':
{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',
'4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',
'7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',
'10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',
'13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',
'16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',
'19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',
'22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',
'25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',
'28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',
'31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',
'34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',
'37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}
chromosome_fasta_path = ref_dir + "/references/chromosomes/tsb/"
print("Beginning installation. This may take up to 40 minutes to complete.")
if not rsync:
try:
if bash:
try:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + 'ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerMatrixGenerator/' + genome + '.tar.gz 2>> install.log' + "'")
except:
print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
try:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log' + "'")
except:
print("The Sanger ftp site is not responding. Please check your internet connection/try again later.")
else:
os.system('wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log')
os.system("tar -xzf " + ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz -C " + ref_dir + "/references/chromosomes/tsb/")
os.remove(ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
else:
print("Direct download for RSYNC is not yet supported")
sys.exit()
# try:
# if bash:
# os.system("bash -c '" + "rsync -av -m --include='*/' rsync://ftp.ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/" + genome + ".tar.gz " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
# else:
# os.system("rsync -av -m rsync://ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/" + genome + ".tar.gz " + chromosome_fasta_path + " 2>&1>> install.log")
# os.system("tar -xzf " + ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz -C " + ref_dir + "/references/chromosomes/tsb/")
# os.remove(ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz")
# except:
# print("The ensembl ftp site is not currently responding.")
# sys.exit()
chromosome_TSB_path = chromosome_fasta_path + genome + "/"
corrupt = False
for files in os.listdir(chromosome_TSB_path):
if "proportions" in files:
continue
if ".DS_Store" in files:
continue
chrom = files.split(".")
chrom = chrom[0]
check = md5(chromosome_TSB_path + files)
if check_sum[genome][chrom] != check:
corrupt = True
os.remove(chromosome_TSB_path + files)
print("[DEBUG] Chromosome " + chrom + " md5sum did not match => reference md5sum: " + str(check_sum[genome][chrom]) + " new file md5sum: " + str(check))
if corrupt:
print("The transcriptional reference data appears to be corrupted. Please reinstall the " + genome + " genome.")
sys.exit()
print("The transcriptional reference data for " + genome + " has been saved.")
else:
print("Beginning installation. This may take up to 20 minutes to complete.")
first_path = os.getcwd()
ref_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(ref_dir)
print("[DEBUG] Path to SigProfilerMatrixGenerator used for the install: ", ref_dir)
genomes = [genome]
if os.path.exists("install.log"):
os.remove("install.log")
# ref_dir += "/references/"
# chrom_string_dir = ref_dir + "chromosomes/chrom_string/"
# chrom_fasta_dir = ref_dir + "chromosomes/fasta/"
# chrom_tsb_dir = ref_dir + "chromosomes/tsb/"
# matrix_dir = ref_dir + "matrix/"
# vcf_dir = ref_dir + "vcf_files/"
# bed_dir = ref_dir + "vcf_files/BED/"
# log_dir = "logs/"
# new_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]
# for dirs in new_dirs:
# if not os.path.exists(dirs):
# os.makedirs(dirs)
install_chromosomes(genomes, ref_dir, custom, rsync, bash)
install_chromosomes_tsb (genomes, ref_dir, custom)
if os.path.exists("BRCA_example/"):
shutil.copy("BRCA_example/", "references/vcf_files/")
if os.path.exists("example_test"):
shutil.copy("example_test/", "references/vcf_files/")
if os.path.exists("context_distributions/"):
shutil.copy("context_distributions/", "references/chromosomes/")
print("All reference files have been created.")
if genome != "rn6" and genome != 'dog' and genome != 'c_elegans':
print("Verifying and benchmarking installation now...")
benchmark(genome, ref_dir)
print ("To proceed with matrix_generation, please provide the path to your vcf files and an appropriate output path.")
shutil.rmtree(chrom_string_dir)
print("Installation complete.")
os.chdir(first_path)
def main ():
first_path= os.getcwd()
os.chdir(first_path + "/sigProfilerMatrixGenerator/")
genomes = ['mm9', 'mm10','GRCh37', 'GRCh38' ]
#genomes = ['GRCh37']
custom = False
parser = argparse.ArgumentParser(description="Provide the necessary arguments to install the reference files.")
parser.add_argument("-g", "--genome", nargs='?', help="Optional parameter instructs script to install the custom genome.")
parser.add_argument("-ct", "--custom", help="Optional parameter instructs script to create the reference files for a custom genome", action='store_true')
args = parser.parse_args()
if args.genome:
genomes = [args.genome]
if args.custom:
custom = True
if os.path.exists("install.log"):
os.system("rm install.log")
ref_dir = "references/"
chrom_string_dir = ref_dir + "chromosomes/chrom_string/"
chrom_fasta_dir = ref_dir + "chromosomes/fasta/"
chrom_tsb_dir = ref_dir + "chromosomes/tsb/"
matrix_dir = ref_dir + "matrix/"
vcf_dir = ref_dir + "vcf_files/"
bed_dir = ref_dir + "vcf_files/BED/"
log_dir = "logs/"
new_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]
current_dir = os.getcwd()
for dirs in new_dirs:
if not os.path.exists(dirs):
os.makedirs(dirs)
install_chromosomes(genomes, ref_dir, custom)
install_chromosomes_tsb (genomes, ref_dir, custom)
#install_chromosomes_tsb_BED (genomes, custom, ref_dir)
if os.path.exists("BRCA_example/"):
os.system("mv BRCA_example/ references/vcf_files/")
if os.path.exists("example_test"):
os.system("mv example_test/ references/vcf_files/")
if os.path.exists("context_distributions/"):
os.system("mv context_distributions/ references/chromosomes/")
if os.path.exists(chrom_tsb_dir + "GRCh37/"):
print("All reference files have been created.\nVerifying and benchmarking installation now...")
benchmark(ref_dir)
else:
print("All reference files have been created.")
print ("Please place your vcf files for each sample into the 'references/vcf_files/[test]/[mutation_type]/' directory. Once you have done that, you can proceed with the matrix generation.")
#os.system("rm -r " + chrom_string_dir)
print("Installation complete.")
os.chdir(first_path)
if __name__ == '__main__':
main() | 61.255692 | 245 | 0.714527 |
from __future__ import print_function
import os
import sys
import re
import subprocess
import argparse
import time
from scipy import spatial
import pandas as pd
import shutil
import logging
import hashlib
from SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn
from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return (hash_md5.hexdigest())
def install_chromosomes (genomes, ref_dir, custom, rsync, bash):
if custom:
for genome in genomes:
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
chromosome_fasta_path = "references/chromosomes/fasta/" + genome + "/"
os.system("python scripts/save_chrom_strings.py -g " + genome)
print("Chromosome string files for " + genome + " have been created. Continuing with installation.")
else:
for genome in genomes:
species = None
chrom_number = None
if genome == 'GRCh37' or genome == 'GRCh38':
species = "homo_sapiens"
chrom_number = 24
elif genome == 'mm10' or genome == 'mm9':
species = "mus_musculus"
chrom_number = 21
elif genome == 'rn6':
species = 'rattus_norvegicus'
chrom_number = 22
else:
print(genome + " is not supported. The following genomes are supported:\nGRCh37, GRCh38, mm10")
sys.exit()
chromosome_string_path = "references/chromosomes/chrom_string/" + genome + "/"
chromosome_fasta_path = "references/chromosomes/fasta/" + genome + "/"
if os.path.exists(ref_dir + "chromosomes/tsb/" + genome) and len(os.listdir(ref_dir + "chromosomes/tsb/" + genome)) >= chrom_number:
break
wget_flag = True
if os.path.exists(chromosome_string_path) == False or len(os.listdir(chromosome_string_path)) <= chrom_number:
print("[DEBUG] Chromosome string files found at: " + ref_dir + chromosome_string_path)
if os.path.exists(chromosome_fasta_path) == False or len(os.listdir(chromosome_fasta_path)) <= chrom_number:
print("[DEBUG] Chromosome fasta files found at: " + ref_dir + chromosome_fasta_path)
print("Chromosomes are not currently saved as individual text files for " + genome + ". Downloading the files now...")
if not rsync:
if wget_flag:
try:
if genome == 'GRCh37':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log')
elif genome == 'mm9':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log')
elif genome == 'rn6':
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log')
else:
if bash:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log' + "'")
else:
os.system('wget -r -l1 -c -nc --no-parent -A "*.dna.chromosome.*" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log')
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
else:
try:
if genome == 'GRCh37':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
elif genome == 'mm9':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
elif genome == 'rn6':
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ " + chromosome_fasta_path + " 2>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ " + chromosome_fasta_path + " 2>> install.log")
else:
if bash:
os.system("bash -c '" + "rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/"+species+"/dna/ " + chromosome_fasta_path + " 2>&1>> install.log" + "'")
else:
os.system("rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/"+species+"/dna/ " + chromosome_fasta_path + " 2>&1>> install.log")
os.system("gzip -d references/chromosomes/fasta/" + genome + "/*.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
print("Chromosome fasta files for " + genome + " have been installed. Creating the chromosome string files now...")
os.system("python scripts/save_chrom_strings.py -g " + genome)
print("Chromosome string files for " + genome + " have been created. Continuing with installation.")
shutil.rmtree(chromosome_fasta_path)
else:
print("Chromosome reference files exist for " + genome + ". Continuing with installation.")
def install_chromosomes_tsb (genomes, ref_dir, custom):
check_sum = {'GRCh37':
{'1':'a7d51305e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',
'4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',
'7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',
'10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',
'13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',
'16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',
'19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',
'22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'GRCh38':
{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',
'4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',
'7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',
'10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',
'13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',
'16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',
'19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',
'22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'mm9':
{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',
'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',
'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',
'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',
'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',
'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',
'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'mm10':
{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',
'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',
'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',
'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',
'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',
'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',
'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'rn6':
{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',
'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',
'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',
'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',
'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',
'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',
'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',
'X':'7a06bafab97c59a819f03633f0a6b7a2'},
'c_elegans':
{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',
'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',
'MtDNA':'48983f530959780de0125f74a87d4fc1'},
'dog':
{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',
'4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',
'7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',
'10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',
'13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',
'16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',
'19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',
'22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',
'25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',
'28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',
'31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',
'34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',
'37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}
for genome in genomes:
chrom_number = None
if genome == 'GRCh37' or genome == 'GRCh38':
chrom_number = 24
elif genome == 'mm10' or genome == 'mm9':
chrom_number = 21
elif genome == 'rn6':
chrom_number = 22
chromosome_TSB_path = "references/chromosomes/tsb/" + genome + "/"
transcript_files = "references/chromosomes/transcripts/" + genome + "/"
print("[DEBUG] Chromosome tsb files found at: " + ref_dir + chromosome_TSB_path)
if os.path.exists(transcript_files) == False or len(os.listdir(transcript_files)) < 1:
print("Please download the transcript files before proceeding. You can download the files from 'http://www.ensembl.org/biomart/martview'.")
print("Follow the format presented in the README file:\n\n\tGene stable ID Transcript stable ID Chromosome/scaffold name Strand Transcript start (bp) Transcript end (bp)\n\n\n")
sys.exit()
if os.path.exists(chromosome_TSB_path) == False or len(os.listdir(chromosome_TSB_path)) < chrom_number:
print("The transcriptional reference data for " + genome + " has not been saved. Creating these files now")
os.system("python scripts/save_tsb_192.py -g " + genome)
corrupt = False
for files in os.listdir(chromosome_TSB_path):
if "proportions" in files:
continue
if ".DS_Store" in files:
continue
chrom = files.split(".")
chrom = chrom[0]
check = md5(chromosome_TSB_path + files)
if check_sum[genome][chrom] != check:
corrupt = True
os.remove(chromosome_TSB_path + files)
print("[DEBUG] Chromosome " + chrom + " md5sum did not match => reference md5sum: " + str(check_sum[genome][chrom]) + " new file md5sum: " + str(check))
if corrupt:
print("The transcriptional reference data appears to be corrupted. Please reinstall the " + genome + " genome.")
sys.exit()
print("The transcriptional reference data for " + genome + " has been saved.")
def install_chromosomes_tsb_BED (genomes, custom, ref_dir):
for genome in genomes:
if not os.path.exists(ref_dir + "chromosomes/tsb_BED/" + genome + "/") or len(os.listdir(ref_dir + "chromosomes/tsb_BED/" + genome + "/")) < 19:
os.system("python scripts/save_chrom_tsb_separate.py -g " + genome)
print("The TSB BED files for " + genome + " have been saved.")
def benchmark (genome, ref_dir):
ref_dir = os.path.dirname(os.path.abspath(__file__))
vcf_path = ref_dir + "/references/vcf_files/" + genome + "_bench/"
start_time = time.time()
matGen.SigProfilerMatrixGeneratorFunc(genome + "_bench", genome, vcf_path)
end_time = time.time()
original_matrix_96 = ref_dir + "/scripts/Benchmark/" + genome + "_bench_orig_96.txt"
original_matrix_3072 = ref_dir + "/scripts/Benchmark/" + genome + "_bench_orig_3072.txt"
new_matrix_96 = vcf_path + "output/SBS/" + genome + "_bench.SBS96.all"
new_matrix_3072 = vcf_path + "output/SBS/" + genome + "_bench.SBS6144.all"
05e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',
'4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',
'7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',
'10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',
'13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',
'16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',
'19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',
'22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'GRCh38':
{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',
'4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',
'7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',
'10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',
'13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',
'16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',
'19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',
'22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',
'MT':'dfd6db5743d399516d5c8dadee5bee78'},
'mm9':
{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',
'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',
'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',
'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',
'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',
'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',
'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'mm10':
{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',
'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',
'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',
'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',
'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',
'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',
'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',
'MT':'a1d56043ed8308908965dd080a4d0c8d'},
'rn6':
{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',
'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',
'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',
'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',
'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',
'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',
'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',
'X':'7a06bafab97c59a819f03633f0a6b7a2'},
'c_elegans':
{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',
'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',
'MtDNA':'48983f530959780de0125f74a87d4fc1'},
'dog':
{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',
'4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',
'7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',
'10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',
'13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',
'16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',
'19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',
'22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',
'25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',
'28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',
'31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',
'34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',
'37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}
chromosome_fasta_path = ref_dir + "/references/chromosomes/tsb/"
print("Beginning installation. This may take up to 40 minutes to complete.")
if not rsync:
try:
if bash:
try:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + 'ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerMatrixGenerator/' + genome + '.tar.gz 2>> install.log' + "'")
except:
print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
try:
os.system("bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log' + "'")
except:
print("The Sanger ftp site is not responding. Please check your internet connection/try again later.")
else:
os.system('wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log')
os.system("tar -xzf " + ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz -C " + ref_dir + "/references/chromosomes/tsb/")
os.remove(ref_dir + "/references/chromosomes/tsb/" + genome + ".tar.gz")
except:
print("The ensembl ftp site is not currently responding.")
sys.exit()
else:
print("Direct download for RSYNC is not yet supported")
sys.exit()
chromosome_TSB_path = chromosome_fasta_path + genome + "/"
corrupt = False
for files in os.listdir(chromosome_TSB_path):
if "proportions" in files:
continue
if ".DS_Store" in files:
continue
chrom = files.split(".")
chrom = chrom[0]
check = md5(chromosome_TSB_path + files)
if check_sum[genome][chrom] != check:
corrupt = True
os.remove(chromosome_TSB_path + files)
print("[DEBUG] Chromosome " + chrom + " md5sum did not match => reference md5sum: " + str(check_sum[genome][chrom]) + " new file md5sum: " + str(check))
if corrupt:
print("The transcriptional reference data appears to be corrupted. Please reinstall the " + genome + " genome.")
sys.exit()
print("The transcriptional reference data for " + genome + " has been saved.")
else:
print("Beginning installation. This may take up to 20 minutes to complete.")
first_path = os.getcwd()
ref_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(ref_dir)
print("[DEBUG] Path to SigProfilerMatrixGenerator used for the install: ", ref_dir)
genomes = [genome]
if os.path.exists("install.log"):
os.remove("install.log")
install_chromosomes(genomes, ref_dir, custom, rsync, bash)
install_chromosomes_tsb (genomes, ref_dir, custom)
if os.path.exists("BRCA_example/"):
shutil.copy("BRCA_example/", "references/vcf_files/")
if os.path.exists("example_test"):
shutil.copy("example_test/", "references/vcf_files/")
if os.path.exists("context_distributions/"):
shutil.copy("context_distributions/", "references/chromosomes/")
print("All reference files have been created.")
if genome != "rn6" and genome != 'dog' and genome != 'c_elegans':
print("Verifying and benchmarking installation now...")
benchmark(genome, ref_dir)
print ("To proceed with matrix_generation, please provide the path to your vcf files and an appropriate output path.")
shutil.rmtree(chrom_string_dir)
print("Installation complete.")
os.chdir(first_path)
def main ():
first_path= os.getcwd()
os.chdir(first_path + "/sigProfilerMatrixGenerator/")
genomes = ['mm9', 'mm10','GRCh37', 'GRCh38' ]
custom = False
parser = argparse.ArgumentParser(description="Provide the necessary arguments to install the reference files.")
parser.add_argument("-g", "--genome", nargs='?', help="Optional parameter instructs script to install the custom genome.")
parser.add_argument("-ct", "--custom", help="Optional parameter instructs script to create the reference files for a custom genome", action='store_true')
args = parser.parse_args()
if args.genome:
genomes = [args.genome]
if args.custom:
custom = True
if os.path.exists("install.log"):
os.system("rm install.log")
ref_dir = "references/"
chrom_string_dir = ref_dir + "chromosomes/chrom_string/"
chrom_fasta_dir = ref_dir + "chromosomes/fasta/"
chrom_tsb_dir = ref_dir + "chromosomes/tsb/"
matrix_dir = ref_dir + "matrix/"
vcf_dir = ref_dir + "vcf_files/"
bed_dir = ref_dir + "vcf_files/BED/"
log_dir = "logs/"
new_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]
current_dir = os.getcwd()
for dirs in new_dirs:
if not os.path.exists(dirs):
os.makedirs(dirs)
install_chromosomes(genomes, ref_dir, custom)
install_chromosomes_tsb (genomes, ref_dir, custom)
if os.path.exists("BRCA_example/"):
os.system("mv BRCA_example/ references/vcf_files/")
if os.path.exists("example_test"):
os.system("mv example_test/ references/vcf_files/")
if os.path.exists("context_distributions/"):
os.system("mv context_distributions/ references/chromosomes/")
if os.path.exists(chrom_tsb_dir + "GRCh37/"):
print("All reference files have been created.\nVerifying and benchmarking installation now...")
benchmark(ref_dir)
else:
print("All reference files have been created.")
print ("Please place your vcf files for each sample into the 'references/vcf_files/[test]/[mutation_type]/' directory. Once you have done that, you can proceed with the matrix generation.")
print("Installation complete.")
os.chdir(first_path)
if __name__ == '__main__':
main() | true | true |
f72b5d1333df08c7bba72728c8f28fe54e5dda17 | 2,824 | py | Python | storyboard/tests/plugin/test_event_worker.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/tests/plugin/test_event_worker.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/tests/plugin/test_event_worker.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
import storyboard.db.api.base as db_api_base
import storyboard.plugin.event_worker as plugin_base
import storyboard.tests.base as base
class TestWorkerTaskBase(base.FunctionalTest):
def setUp(self):
super(TestWorkerTaskBase, self).setUp()
def test_resolve_by_name(self):
'''Assert that resolve_resource_by_name works.'''
worker = TestWorkerPlugin({})
with base.HybridSessionManager():
session = db_api_base.get_session()
task = worker.resolve_resource_by_name(session, 'task', 1)
self.assertIsNotNone(task)
self.assertEqual(1, task.id)
project_group = worker.resolve_resource_by_name(session,
'project_group', 1)
self.assertIsNotNone(project_group)
self.assertEqual(1, project_group.id)
project = worker.resolve_resource_by_name(session, 'project', 1)
self.assertIsNotNone(project)
self.assertEqual(1, project.id)
user = worker.resolve_resource_by_name(session, 'user', 1)
self.assertIsNotNone(user)
self.assertEqual(1, user.id)
team = worker.resolve_resource_by_name(session, 'team', 1)
self.assertIsNotNone(team)
self.assertEqual(1, team.id)
story = worker.resolve_resource_by_name(session, 'story', 1)
self.assertIsNotNone(story)
self.assertEqual(1, story.id)
branch = worker.resolve_resource_by_name(session, 'branch', 1)
self.assertIsNotNone(branch)
self.assertEqual(1, branch.id)
milestone = worker.resolve_resource_by_name(session,
'milestone', 1)
self.assertIsNotNone(milestone)
self.assertEqual(1, milestone.id)
class TestWorkerPlugin(plugin_base.WorkerTaskBase):
def handle(self, session, author, method, url, path, query_string, status,
resource, resource_id, sub_resource=None, sub_resource_id=None,
resource_before=None, resource_after=None):
pass
def enabled(self):
return True
| 37.653333 | 79 | 0.651558 |
import storyboard.db.api.base as db_api_base
import storyboard.plugin.event_worker as plugin_base
import storyboard.tests.base as base
class TestWorkerTaskBase(base.FunctionalTest):
def setUp(self):
super(TestWorkerTaskBase, self).setUp()
def test_resolve_by_name(self):
worker = TestWorkerPlugin({})
with base.HybridSessionManager():
session = db_api_base.get_session()
task = worker.resolve_resource_by_name(session, 'task', 1)
self.assertIsNotNone(task)
self.assertEqual(1, task.id)
project_group = worker.resolve_resource_by_name(session,
'project_group', 1)
self.assertIsNotNone(project_group)
self.assertEqual(1, project_group.id)
project = worker.resolve_resource_by_name(session, 'project', 1)
self.assertIsNotNone(project)
self.assertEqual(1, project.id)
user = worker.resolve_resource_by_name(session, 'user', 1)
self.assertIsNotNone(user)
self.assertEqual(1, user.id)
team = worker.resolve_resource_by_name(session, 'team', 1)
self.assertIsNotNone(team)
self.assertEqual(1, team.id)
story = worker.resolve_resource_by_name(session, 'story', 1)
self.assertIsNotNone(story)
self.assertEqual(1, story.id)
branch = worker.resolve_resource_by_name(session, 'branch', 1)
self.assertIsNotNone(branch)
self.assertEqual(1, branch.id)
milestone = worker.resolve_resource_by_name(session,
'milestone', 1)
self.assertIsNotNone(milestone)
self.assertEqual(1, milestone.id)
class TestWorkerPlugin(plugin_base.WorkerTaskBase):
def handle(self, session, author, method, url, path, query_string, status,
resource, resource_id, sub_resource=None, sub_resource_id=None,
resource_before=None, resource_after=None):
pass
def enabled(self):
return True
| true | true |
f72b5ecb16ba78f38ce59b844429ea0150cb7a47 | 8,108 | py | Python | airflow/www/api/experimental/endpoints.py | guiligan/incubator-airflow | b3c0ae003037ae6c652b177b9f86ecac84c792a5 | [
"Apache-2.0"
] | null | null | null | airflow/www/api/experimental/endpoints.py | guiligan/incubator-airflow | b3c0ae003037ae6c652b177b9f86ecac84c792a5 | [
"Apache-2.0"
] | null | null | null | airflow/www/api/experimental/endpoints.py | guiligan/incubator-airflow | b3c0ae003037ae6c652b177b9f86ecac84c792a5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import (
g, Blueprint, jsonify, request, url_for
)
import airflow.api
from airflow.api.common.experimental import delete_dag as delete
from airflow.api.common.experimental import pool as pool_api
from airflow.api.common.experimental import trigger_dag as trigger
from airflow.api.common.experimental.get_task import get_task
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.www.app import csrf
_log = LoggingMixin().log
requires_authentication = airflow.api.api_auth.requires_authentication
api_experimental = Blueprint('api_experimental', __name__)
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>/dag_runs', methods=['POST'])
@requires_authentication
def trigger_dag(dag_id):
"""
Trigger a new dag run for a Dag with an execution date of now unless
specified in the data.
"""
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
if getattr(g, 'user', None):
_log.info("User {} created {}".format(g.user, dr))
response = jsonify(message="Created {}".format(dr))
return response
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>', methods=['DELETE'])
@requires_authentication
def delete_dag(dag_id):
"""
Delete all DB records related to the specified Dag.
"""
try:
count = delete.delete_dag(dag_id)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
return jsonify(message="Removed {} record(s)".format(count), count=count)
@api_experimental.route('/test', methods=['GET'])
@requires_authentication
def test():
return jsonify(status='OK')
@api_experimental.route('/dags/<string:dag_id>/tasks/<string:task_id>', methods=['GET'])
@requires_authentication
def task_info(dag_id, task_id):
"""Returns a JSON with a task's public instance variables. """
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route(
'/dags/<string:dag_id>/dag_runs/<string:execution_date>/tasks/<string:task_id>',
methods=['GET'])
@requires_authentication
def task_instance_info(dag_id, execution_date, task_id):
"""
Returns a JSON with a task instance's public instance variables.
The format for the exec_date is expected to be
"YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will
of course need to have been encoded for URL in the request.
"""
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
info = get_task_instance(dag_id, task_id, execution_date)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route('/latest_runs', methods=['GET'])
@requires_authentication
def latest_dag_runs():
"""Returns the latest DagRun for each DAG formatted for the UI. """
from airflow.models import DagRun
dagruns = DagRun.get_latest_runs()
payload = []
for dagrun in dagruns:
if dagrun.execution_date:
payload.append({
'dag_id': dagrun.dag_id,
'execution_date': dagrun.execution_date.isoformat(),
'start_date': ((dagrun.start_date or '') and
dagrun.start_date.isoformat()),
'dag_run_url': url_for('airflow.graph', dag_id=dagrun.dag_id,
execution_date=dagrun.execution_date)
})
return jsonify(items=payload) # old flask versions dont support jsonifying arrays
@api_experimental.route('/pools/<string:name>', methods=['GET'])
@requires_authentication
def get_pool(name):
"""Get pool by a given name."""
try:
pool = pool_api.get_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@api_experimental.route('/pools', methods=['GET'])
@requires_authentication
def get_pools():
"""Get all pools."""
try:
pools = pool_api.get_pools()
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify([p.to_json() for p in pools])
@csrf.exempt
@api_experimental.route('/pools', methods=['POST'])
@requires_authentication
def create_pool():
"""Create a pool."""
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@csrf.exempt
@api_experimental.route('/pools/<string:name>', methods=['DELETE'])
@requires_authentication
def delete_pool(name):
"""Delete pool."""
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
| 32.302789 | 88 | 0.656265 |
from flask import (
g, Blueprint, jsonify, request, url_for
)
import airflow.api
from airflow.api.common.experimental import delete_dag as delete
from airflow.api.common.experimental import pool as pool_api
from airflow.api.common.experimental import trigger_dag as trigger
from airflow.api.common.experimental.get_task import get_task
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.www.app import csrf
_log = LoggingMixin().log
requires_authentication = airflow.api.api_auth.requires_authentication
api_experimental = Blueprint('api_experimental', __name__)
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>/dag_runs', methods=['POST'])
@requires_authentication
def trigger_dag(dag_id):
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
if getattr(g, 'user', None):
_log.info("User {} created {}".format(g.user, dr))
response = jsonify(message="Created {}".format(dr))
return response
@csrf.exempt
@api_experimental.route('/dags/<string:dag_id>', methods=['DELETE'])
@requires_authentication
def delete_dag(dag_id):
try:
count = delete.delete_dag(dag_id)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
return jsonify(message="Removed {} record(s)".format(count), count=count)
@api_experimental.route('/test', methods=['GET'])
@requires_authentication
def test():
return jsonify(status='OK')
@api_experimental.route('/dags/<string:dag_id>/tasks/<string:task_id>', methods=['GET'])
@requires_authentication
def task_info(dag_id, task_id):
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route(
'/dags/<string:dag_id>/dag_runs/<string:execution_date>/tasks/<string:task_id>',
methods=['GET'])
@requires_authentication
def task_instance_info(dag_id, execution_date, task_id):
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
info = get_task_instance(dag_id, task_id, execution_date)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = 404
return response
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields)
@api_experimental.route('/latest_runs', methods=['GET'])
@requires_authentication
def latest_dag_runs():
from airflow.models import DagRun
dagruns = DagRun.get_latest_runs()
payload = []
for dagrun in dagruns:
if dagrun.execution_date:
payload.append({
'dag_id': dagrun.dag_id,
'execution_date': dagrun.execution_date.isoformat(),
'start_date': ((dagrun.start_date or '') and
dagrun.start_date.isoformat()),
'dag_run_url': url_for('airflow.graph', dag_id=dagrun.dag_id,
execution_date=dagrun.execution_date)
})
return jsonify(items=payload)
@api_experimental.route('/pools/<string:name>', methods=['GET'])
@requires_authentication
def get_pool(name):
try:
pool = pool_api.get_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@api_experimental.route('/pools', methods=['GET'])
@requires_authentication
def get_pools():
try:
pools = pool_api.get_pools()
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify([p.to_json() for p in pools])
@csrf.exempt
@api_experimental.route('/pools', methods=['POST'])
@requires_authentication
def create_pool():
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
@csrf.exempt
@api_experimental.route('/pools/<string:name>', methods=['DELETE'])
@requires_authentication
def delete_pool(name):
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as e:
_log.error(e)
response = jsonify(error="{}".format(e))
response.status_code = getattr(e, 'status', 500)
return response
else:
return jsonify(pool.to_json())
| true | true |
f72b5fa86d6b83ca6337f2fcfbd2bd36f1181b33 | 356 | py | Python | src/subplot1.py | AnaharaYasuo/mlPractice | 1a3d110fdc6cf4084ee6b1268d215151de5939cb | [
"Apache-2.0"
] | null | null | null | src/subplot1.py | AnaharaYasuo/mlPractice | 1a3d110fdc6cf4084ee6b1268d215151de5939cb | [
"Apache-2.0"
] | null | null | null | src/subplot1.py | AnaharaYasuo/mlPractice | 1a3d110fdc6cf4084ee6b1268d215151de5939cb | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
x = np.linspace(-5, 5, 300)
sin_x = np.sin(x)
cos_x = np.cos(x)
flg, aexs = plt.subplots(2, 1)
aexs[0].set_ylim([-1.5,1.5])
aexs[1].set_ylim([-1.5,1.5])
aexs[0].plot(x,sin_x,color="r")
aexs[1].plot(x,cos_x,color="k")
plt.show() | 23.733333 | 39 | 0.564607 | import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
x = np.linspace(-5, 5, 300)
sin_x = np.sin(x)
cos_x = np.cos(x)
flg, aexs = plt.subplots(2, 1)
aexs[0].set_ylim([-1.5,1.5])
aexs[1].set_ylim([-1.5,1.5])
aexs[0].plot(x,sin_x,color="r")
aexs[1].plot(x,cos_x,color="k")
plt.show() | true | true |
f72b609af987bf7e917700aeb972f5133849bc61 | 6,985 | py | Python | log_caspase/model_378.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_caspase/model_378.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_caspase/model_378.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C3ub')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('Xiap', ['C3A'])
Monomer('C8A', ['C3pro'])
Monomer('C3pro', ['C8A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C3ub_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('Xiap_0', 94500.0)
Parameter('C8A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Parameter('Fadd_0', 130000.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C3ub_obs', C3ub())
Observable('C3A_obs', C3A())
Observable('Xiap_obs', Xiap())
Observable('C8A_obs', C8A())
Observable('C3pro_obs', C3pro())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Observable('Fadd_obs', Fadd())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=None) + C3pro(C8A=None) | C8A(C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=1) % C3pro(C8A=1) >> C8A(C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C3ub(), C3ub_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(Xiap(C3A=None), Xiap_0)
Initial(C8A(C3pro=None), C8A_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 69.85 | 296 | 0.818039 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C3ub')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('Xiap', ['C3A'])
Monomer('C8A', ['C3pro'])
Monomer('C3pro', ['C8A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C3ub_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('Xiap_0', 94500.0)
Parameter('C8A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Parameter('Fadd_0', 130000.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C3ub_obs', C3ub())
Observable('C3A_obs', C3A())
Observable('Xiap_obs', Xiap())
Observable('C8A_obs', C8A())
Observable('C3pro_obs', C3pro())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Observable('Fadd_obs', Fadd())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=None) + C3pro(C8A=None) | C8A(C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=1) % C3pro(C8A=1) >> C8A(C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C3ub(), C3ub_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(Xiap(C3A=None), Xiap_0)
Initial(C8A(C3pro=None), C8A_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| true | true |
f72b6381fc06e7cffc979d54eb10670337ea3e1b | 35,546 | py | Python | sorn/utils.py | Saran-nns/sorn | 619772c508b88aa711780ab9155fe5d0aa5214eb | [
"MIT"
] | 19 | 2019-03-18T21:51:53.000Z | 2022-01-02T01:27:37.000Z | sorn/utils.py | Saran-nns/sorn | 619772c508b88aa711780ab9155fe5d0aa5214eb | [
"MIT"
] | 32 | 2019-03-10T23:55:22.000Z | 2022-01-04T19:28:45.000Z | sorn/utils.py | Saran-nns/sorn | 619772c508b88aa711780ab9155fe5d0aa5214eb | [
"MIT"
] | 4 | 2019-05-07T13:46:47.000Z | 2022-01-07T17:06:41.000Z | from __future__ import division
import numpy as np
from scipy.stats import norm
import random
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import curve_fit
from scipy import stats
import networkx as nx
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
class Initializer(object):
"""
Helper class to initialize the matrices for the SORN
"""
def __init__(self):
pass
@staticmethod
def generate_strong_inp(length: int, reservoir_size: int):
"""Generate strong one-hot vector of input. Random neurons in the reservoir acts as inputs
Args:
length (int): Number of input neurons
Returns:
inp (array): Input vector of length equals the number of neurons in the reservoir
with randomly chosen neuron set active
idx (list): List of chosen input neurons """
inp = [0] * reservoir_size
x = [0] * length
idx = np.random.choice(length, np.random.randint(reservoir_size))
for i in idx:
x[i] = 1.0e4
inp[: len(x)] = x
return inp, idx
# Generate multi-node one-hot strong inputs
@staticmethod
def multi_one_hot_inp(ne: int, inputs: list, n_nodes_per_inp: int):
"""Generate multi(n_nodes_per_inp) one hot vector for each input.
For each input, set n_nodes_per_inp equals one and the rest of
neurons in the pool recieves no external stimuli
Args:
ne (int): Number of excitatory units in sorn
inputs (list): input labels
n_nodes_per_inp(int): Number of target units in pool that receives single input
Returns:
one_hot_vector for each label with length equals ne
"""
one_hot = np.zeros((ne, len(inputs)))
idxs = []
for _ in range(n_nodes_per_inp):
idxs.append(random.sample(range(0, ne), len(inputs)))
idxs = list(zip(*idxs))
j = 0 # Max(j) = len(inputs)
for idx_list in idxs:
for i in idx_list:
one_hot[i][j] = 1
j += 1
return one_hot, idxs
@staticmethod
def generate_gaussian_inputs(length: int, reservoir_size: int):
"""Generate external stimuli sampled from Gaussian distribution.
Randomly neurons in the reservoir receives this input at each timestep
Args:
length (int): Number of input neurons
Returns:
out (array): Input vector of length equals the number of neurons in the reservoir
with randomly chosen neuron set active
idx (int): List of chosen input neurons
"""
out = [0] * reservoir_size
x = [0] * length
idx = np.random.choice(length, np.random.randint(reservoir_size))
inp = np.random.normal(length)
for i in idx:
x[i] = inp[i]
out[: len(x)] = x
return out, idx
@staticmethod
def normalize_weight_matrix(weight_matrix: np.array):
# Applied only while initializing the weight. During simulation, Synaptic scaling applied on weight matrices
""" Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1
Args:
weight_matrix (array): Incoming Weights from W_ee or W_ei or W_ie
Returns:
weight_matrix (array): Normalized weight matrix"""
normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0)
return normalized_weight_matrix
@staticmethod
def generate_lambd_connections(
synaptic_connection: str, ne: int, ni: int, lambd_w: int, lambd_std: int
):
"""Generate lambda incoming connections for Excitatory neurons and outgoing connections per Inhibitory neuron
Args:
synaptic_connection (str): Type of sysnpatic connection (EE,EI or IE)
ne (int): Number of excitatory units
ni (int): Number of inhibitory units
lambd_w (int): Average number of incoming connections
lambd_std (int): Standard deviation of average number of connections per neuron
Returns:
connection_weights (array) - Weight matrix
"""
if synaptic_connection == "EE":
"""Choose random lamda connections per neuron"""
# Draw normally distributed ne integers with mean lambd_w
lambdas_incoming = norm.ppf(
np.random.random(ne), loc=lambd_w, scale=lambd_std
).astype(int)
# lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int)
# List of neurons
list_neurons = list(range(ne))
# Connection weights
connection_weights = np.zeros((ne, ne))
# For each lambd value in the above list,
# generate weights for incoming and outgoing connections
# -------------Gaussian Distribution of weights --------------
# weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution
# Centered around 2 to make all values positive
# ------------Uniform Distribution --------------------------
global_incoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming))
# Index Counter
global_incoming_weights_idx = 0
# Choose the neurons in order [0 to 199]
for neuron in list_neurons:
# Choose ramdom unique (lambdas[neuron]) neurons from list_neurons
possible_connections = list_neurons.copy()
possible_connections.remove(
neuron
) # Remove the selected neuron from possible connections i!=j
# Choose random presynaptic neurons
possible_incoming_connections = random.sample(
possible_connections, lambdas_incoming[neuron]
)
incoming_weights_neuron = global_incoming_weights[
global_incoming_weights_idx : global_incoming_weights_idx
+ lambdas_incoming[neuron]
]
# ---------- Update the connection weight matrix ------------
# Update incoming connection weights for selected 'neuron'
for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron):
connection_weights[possible_incoming_connections[incoming_idx]][
neuron
] = incoming_weight
global_incoming_weights_idx += lambdas_incoming[neuron]
return connection_weights
if synaptic_connection == "EI":
"""Choose random lamda connections per neuron"""
# Draw normally distributed ni integers with mean lambd_w
lambdas = norm.ppf(
np.random.random(ni), loc=lambd_w, scale=lambd_std
).astype(int)
# List of neurons
list_neurons = list(range(ni)) # Each i can connect with random ne neurons
# Initializing connection weights variable
connection_weights = np.zeros((ni, ne))
# ------------Uniform Distribution -----------------------------
global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas))
# Index Counter
global_outgoing_weights_idx = 0
# Choose the neurons in order [0 to 40]
for neuron in list_neurons:
# Choose random unique (lambdas[neuron]) neurons from list_neurons
possible_connections = list(range(ne))
possible_outgoing_connections = random.sample(
possible_connections, lambdas[neuron]
) # possible_outgoing connections to the neuron
# Update weights
outgoing_weights = global_outgoing_weights[
global_outgoing_weights_idx : global_outgoing_weights_idx
+ lambdas[neuron]
]
# ---------- Update the connection weight matrix ------------
# Update outgoing connections for the neuron
for outgoing_idx, outgoing_weight in enumerate(
outgoing_weights
): # Update the columns in the connection matrix
connection_weights[neuron][
possible_outgoing_connections[outgoing_idx]
] = outgoing_weight
# Update the global weight values index
global_outgoing_weights_idx += lambdas[neuron]
return connection_weights
@staticmethod
def get_incoming_connection_dict(weights: np.array):
""" Get the non-zero entries in columns is the incoming connections for the neurons
Args:
weights (np.array): Connection/Synaptic weights
Returns:
dict : Dictionary of incoming connections to each neuron
"""
# Indices of nonzero entries in the columns
connection_dict = dict.fromkeys(range(1, len(weights) + 1), 0)
for i in range(len(weights[0])): # For each neuron
connection_dict[i] = list(np.nonzero(weights[:, i])[0])
return connection_dict
@staticmethod
def get_outgoing_connection_dict(weights: np.array):
"""Get the non-zero entries in rows is the outgoing connections for the neurons
Args:
weights (np.array): Connection/Synaptic weights
Returns:
dict : Dictionary of outgoing connections from each neuron
"""
# Indices of nonzero entries in the rows
connection_dict = dict.fromkeys(range(1, len(weights) + 1), 1)
for i in range(len(weights[0])): # For each neuron
connection_dict[i] = list(np.nonzero(weights[i, :])[0])
return connection_dict
@staticmethod
def prune_small_weights(weights: np.array, cutoff_weight: float):
"""Prune the connections with negative connection strength. The weights less than cutoff_weight set to 0
Args:
weights (np.array): Synaptic strengths
cutoff_weight (float): Lower weight threshold
Returns:
array: Connections weights with values less than cutoff_weight set to 0
"""
weights[weights <= cutoff_weight] = cutoff_weight
return weights
@staticmethod
def set_max_cutoff_weight(weights: np.array, cutoff_weight: float):
""" Set cutoff limit for the values in given array
Args:
weights (np.array): Synaptic strengths
cutoff_weight (float): Higher weight threshold
Returns:
array: Connections weights with values greater than cutoff_weight set to 1
"""
weights[weights > cutoff_weight] = cutoff_weight
return weights
@staticmethod
def get_unconnected_indexes(wee: np.array):
""" Helper function for Structural plasticity to randomly select the unconnected units
Args:
wee (array): Weight matrix
Returns:
list (indices): (row_idx,col_idx)"""
i, j = np.where(wee <= 0.0)
indices = list(zip(i, j))
self_conn_removed = []
for i, idxs in enumerate(indices):
if idxs[0] != idxs[1]:
self_conn_removed.append(indices[i])
return self_conn_removed
@staticmethod
def white_gaussian_noise(mu: float, sigma: float, t: int):
"""Generates white gaussian noise with mean mu, standard deviation sigma and
the noise length equals t
Args:
mu (float): Mean value of Gaussian noise
sigma (float): Standard deviation of Gaussian noise
t (int): Length of noise vector
Returns:
array: White gaussian noise of length t
"""
noise = np.random.normal(mu, sigma, t)
return np.expand_dims(noise, 1)
@staticmethod
def zero_sum_incoming_check(weights: np.array):
"""Make sure, each neuron in the pool has atleast 1 incoming connection
Args:
weights (array): Synaptic strengths
Returns:
array: Synaptic weights of neurons with atleast one positive (non-zero) incoming connection strength
"""
zero_sum_incomings = np.where(np.sum(weights, axis=0) == 0.0)
if len(zero_sum_incomings[-1]) == 0:
return weights
else:
for zero_sum_incoming in zero_sum_incomings[-1]:
rand_indices = np.random.randint(
int(weights.shape[0] * 0.2), size=2
)
rand_values = np.random.uniform(0.0, 0.1, 2)
for i, idx in enumerate(rand_indices):
weights[:, zero_sum_incoming][idx] = rand_values[i]
return weights
class Plotter(object):
"""Wrapper class to call plotting methods
"""
def __init__(self):
pass
@staticmethod
def hist_incoming_conn(
weights: np.array, bin_size: int, histtype: str, savefig: bool
):
"""Plot the histogram of number of presynaptic connections per neuron
Args:
weights (array): Connection weights
bin_size (int): Histogram bin size
histtype (str): Same as histtype matplotlib
savefig (bool): If True plot will be saved as png file in the cwd
Returns:
plot (matplotlib.pyplot): plot object
"""
num_incoming_weights = np.sum(np.array(weights) > 0, axis=0)
plt.figure(figsize=(12, 5))
plt.xlabel("Number of connections")
plt.ylabel("Probability")
# Fit a normal distribution to the data
mu, std = norm.fit(num_incoming_weights)
plt.hist(num_incoming_weights, bins=bin_size, density=True, alpha=0.6, color='b')
# PDF
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, max(num_incoming_weights))
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Distribution of presynaptic connections: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
if savefig:
plt.savefig("hist_incoming_conn")
return plt.show()
@staticmethod
def hist_outgoing_conn(
weights: np.array, bin_size: int, histtype: str, savefig: bool
):
"""Plot the histogram of number of incoming connections per neuron
Args:
weights (array): Connection weights
bin_size (int): Histogram bin size
histtype (str): Same as histtype matplotlib
savefig (bool): If True plot will be saved as png file in the cwd
Returns:
plot object """
# Plot the histogram of distribution of number of incoming connections in the network
num_outgoing_weights = np.sum(np.array(weights) > 0, axis=1)
plt.figure(figsize=(12, 5))
plt.xlabel("Number of connections")
plt.ylabel("Probability")
# Fit a normal distribution to the data
mu, std = norm.fit(num_outgoing_weights)
plt.hist(num_outgoing_weights, bins=bin_size, density=True, alpha=0.6, color='b')
# PDF
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, max(num_outgoing_weights))
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Distribution of post synaptic connections: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
if savefig:
plt.savefig("hist_outgoing_conn")
return plt.show()
@staticmethod
def network_connection_dynamics(
connection_counts: np.array, savefig: bool
):
"""Plot number of positive connection in the excitatory pool
Args:
connection_counts (array) - 1D Array of number of connections in the network per time step
savefig (bool) - If True plot will be saved as png file in the cwd
Returns:
plot object
"""
# Plot graph for entire simulation time period
_, ax1 = plt.subplots(figsize=(12, 5))
ax1.plot(connection_counts, label="Connection dynamics")
plt.margins(x=0)
ax1.set_xticks(ax1.get_xticks()[::2])
ax1.set_title("Network connection dynamics")
plt.ylabel("Number of active connections")
plt.xlabel("Time step")
plt.legend(loc="upper right")
plt.tight_layout()
if savefig:
plt.savefig("connection_dynamics")
return plt.show()
@staticmethod
def hist_firing_rate_network(spike_train: np.array, bin_size: int, savefig: bool):
""" Plot the histogram of firing rate (total number of neurons spike at each time step)
Args:
spike_train (array): Array of spike trains
bin_size (int): Histogram bin size
savefig (bool): If True, plot will be saved in the cwd
Returns:
plot object """
fr = np.count_nonzero(spike_train.tolist(), 1)
# Filter zero entries in firing rate list above
fr = list(filter(lambda a: a != 0, fr))
plt.title("Distribution of population activity without inactive time steps")
plt.xlabel("Spikes/time step")
plt.ylabel("Count")
plt.hist(fr, bin_size)
if savefig:
plt.savefig("hist_firing_rate_network.png")
return plt.show()
@staticmethod
def scatter_plot(spike_train: np.array, savefig: bool):
"""Scatter plot of spike trains
Args:
spike_train (list): Array of spike trains
with_firing_rates (bool): If True, firing rate of the network will be plotted
savefig (bool): If True, plot will be saved in the cwd
Returns:
plot object"""
# Conver the list of spike train into array
spike_train = np.asarray(spike_train)
# Get the indices where spike_train is 1
x, y = np.argwhere(spike_train.T == 1).T
plt.figure(figsize=(8, 5))
firing_rates = Statistics.firing_rate_network(spike_train).tolist()
plt.plot(firing_rates, label="Firing rate")
plt.legend(loc="upper left")
plt.scatter(y, x, s=0.1, color="black")
plt.title('Spike Trains')
plt.xlabel("Time step")
plt.ylabel("Neuron")
plt.legend(loc="upper left")
if savefig:
plt.savefig("ScatterSpikeTrain.png")
return plt.show()
@staticmethod
def raster_plot(spike_train: np.array, savefig: bool):
"""Raster plot of spike trains
Args:
spike_train (array): Array of spike trains
with_firing_rates (bool): If True, firing rate of the network will be plotted
savefig (bool): If True, plot will be saved in the cwd
Returns:
plot object"""
# Conver the list of spike train into array
spike_train = np.asarray(spike_train)
plt.figure(figsize=(11, 6))
firing_rates = Statistics.firing_rate_network(spike_train).tolist()
plt.plot(firing_rates, label="Firing rate")
plt.legend(loc="upper left")
plt.title('Spike Trains')
# Get the indices where spike_train is 1
x, y = np.argwhere(spike_train.T == 1).T
plt.plot(y, x, "|r")
plt.xlabel("Time step")
plt.ylabel("Neuron")
if savefig:
plt.savefig("RasterSpikeTrain.png")
return plt.show()
@staticmethod
def correlation(corr: np.array, savefig: bool):
"""Plot correlation between neurons
Args:
corr (array): Correlation matrix
savefig (bool): If true will save the plot at the current working directory
Returns:
matplotlib.pyplot: Neuron Correlation plot
"""
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
# Custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(
corr,
mask=mask,
cmap=cmap,
xticklabels=5,
yticklabels=5,
vmax=0.1,
center=0,
square=False,
linewidths=0.0,
cbar_kws={"shrink": 0.9},
)
if savefig:
plt.savefig("Correlation between neurons")
return None
@staticmethod
def isi_exponential_fit(
spike_train: np.array, neuron: int, bin_size: int, savefig: bool
):
"""Plot Exponential fit on the inter-spike intervals during training or simulation phase
Args:
spike_train (array): Array of spike trains
neuron (int): Target neuron
bin_size (int): Spike train will be splitted into bins of size bin_size
savefig (bool): If True, plot will be saved in the cwd
Returns:
plot object"""
isi = Statistics.spike_time_intervals(spike_train[:,neuron])
y, x = np.histogram(sorted(isi), bins=bin_size)
x = [int(i) for i in x]
y = [float(i) for i in y]
def exponential_func(y, a, b, c):
return a * np.exp(-b * np.array(y)) - c
# Curve fit
popt, _ = curve_fit(exponential_func, x[1:bin_size], y[1:bin_size])
plt.plot(
x[1:bin_size],
exponential_func(x[1:bin_size], *popt),
label="Exponential fit",
)
plt.title('Distribution of Inter Spike Intervals and Exponential Curve Fit')
plt.scatter(x[1:bin_size], y[1:bin_size], s=2.0, color="black", label="ISI")
plt.xlabel("ISI")
plt.ylabel("Frequency")
plt.legend()
if savefig:
plt.savefig("isi_exponential_fit")
return plt.show()
@staticmethod
def weight_distribution(weights: np.array, bin_size: int, savefig: bool):
"""Plot the distribution of synaptic weights
Args:
weights (array): Connection weights
bin_size (int): Spike train will be splited into bins of size bin_size
savefig (bool): If True, plot will be saved in the cwd
Returns:
plot object"""
weights = weights[
weights >= 0.01
] # Remove the weight values less than 0.01 # As reported in article SORN 2013
y, x = np.histogram(weights, bins=bin_size) # Create histogram with bin_size
plt.title('Synaptic weight distribution')
plt.scatter(x[:-1], y, s=2.0, c="black")
plt.xlabel("Connection strength")
plt.ylabel("Frequency")
if savefig:
plt.savefig("weight_distribution")
return plt.show()
@staticmethod
def linear_lognormal_fit(weights: np.array, num_points: int, savefig: bool):
"""Lognormal curve fit on connection weight distribution
Args:
weights (array): Connection weights
num_points(int): Number of points to be plotted in the x axis
savefig(bool): If True, plot will be saved in the cwd
Returns:
plot object"""
weights = np.array(weights.tolist())
weights = weights[weights >= 0.01]
M = float(np.mean(weights)) # Geometric mean
s = float(np.std(weights)) # Geometric standard deviation
# Lognormal distribution parameters
mu = float(np.mean(np.log(weights))) # Mean of log(X)
sigma = float(np.std(np.log(weights))) # Standard deviation of log(X)
shape = sigma # Scipy's shape parameter
scale = np.exp(mu) # Scipy's scale parameter
median = np.exp(mu)
mode = np.exp(mu - sigma ** 2) # Note that mode depends on both M and s
mean = np.exp(mu + (sigma ** 2 / 2)) # Note that mean depends on both M and s
x = np.linspace(
np.min(weights), np.max(weights), num=num_points
)
pdf = stats.lognorm.pdf(
x, shape, loc=0, scale=scale
)
plt.figure(figsize=(12, 4.5))
plt.title('Curve fit on connection weight distribution')
# Figure on linear scale
plt.subplot(121)
plt.plot(x, pdf)
plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode")
plt.vlines(
mean,
0,
stats.lognorm.pdf(mean, shape, loc=0, scale=scale),
linestyle="--",
color="green",
label="Mean",
)
plt.vlines(
median,
0,
stats.lognorm.pdf(median, shape, loc=0, scale=scale),
color="blue",
label="Median",
)
plt.ylim(ymin=0)
plt.xlabel("Weight")
plt.title("Linear scale")
plt.legend()
# Figure on logarithmic scale
plt.subplot(122)
plt.semilogx(x, pdf)
plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode")
plt.vlines(
mean,
0,
stats.lognorm.pdf(mean, shape, loc=0, scale=scale),
linestyle="--",
color="green",
label="Mean",
)
plt.vlines(
median,
0,
stats.lognorm.pdf(median, shape, loc=0, scale=scale),
color="blue",
label="Median",
)
plt.ylim(ymin=0)
plt.xlabel("Weight")
plt.title("Logarithmic scale")
plt.legend()
if savefig:
plt.savefig("LinearLognormalFit")
return plt.show()
@staticmethod
def plot_network(corr: np.array, corr_thres: float, fig_name: str = None):
"""Network x graphical visualization of the network using the correlation matrix
Args:
corr (array): Correlation between neurons
corr_thres (array): Threshold to prune the connection. Smaller the threshold,
higher the density of connections
fig_name (array, optional): Name of the figure. Defaults to None.
Returns:
matplotlib.pyplot: Plot instance
"""
df = pd.DataFrame(corr)
links = df.stack().reset_index()
links.columns = ["var1", "var2", "value"]
links_filtered = links.loc[
(links["value"] > corr_thres) & (links["var1"] != links["var2"])
]
G = nx.from_pandas_edgelist(links_filtered, "var1", "var2")
plt.figure(figsize=(50, 50))
nx.draw(
G,
with_labels=True,
node_color="orange",
node_size=50,
linewidths=5,
font_size=10,
)
plt.text(0.1, 0.9, "%s" % corr_thres)
plt.savefig("%s" % fig_name)
plt.show()
@staticmethod
def hamming_distance(hamming_dist: list, savefig: bool):
"""Hamming distance between true netorks states and perturbed network states
Args:
hamming_dist (list): Hamming distance values
savefig (bool): If True, save the fig at current working directory
Returns:
matplotlib.pyplot: Hamming distance between true and perturbed network states
"""
plt.figure(figsize=(15, 6))
plt.title("Hamming distance between actual and perturbed states")
plt.xlabel("Time steps")
plt.ylabel("Hamming distance")
plt.plot(hamming_dist)
if savefig:
plt.savefig("HammingDistance")
return plt.show()
class Statistics(object):
""" Wrapper class for statistical analysis methods """
def __init__(self):
pass
@staticmethod
def firing_rate_neuron(spike_train: np.array, neuron: int, bin_size: int):
"""Measure spike rate of given neuron during given time window
Args:
spike_train (array): Array of spike trains
neuron (int): Target neuron in the reservoir
bin_size (int): Divide the spike trains into bins of size bin_size
Returns:
int: firing_rate """
time_period = len(spike_train[:, 0])
neuron_spike_train = spike_train[:, neuron]
# Split the list(neuron_spike_train) into sub lists of length time_step
samples_spike_train = [
neuron_spike_train[i : i + bin_size]
for i in range(0, len(neuron_spike_train), bin_size)
]
spike_rate = 0.0
for _, spike_train in enumerate(samples_spike_train):
spike_rate += list(spike_train).count(1.0)
spike_rate = spike_rate * bin_size / time_period
return time_period, bin_size, spike_rate
@staticmethod
def firing_rate_network(spike_train: np.array):
"""Calculate number of neurons spikes at each time step.Firing rate of the network
Args:
spike_train (array): Array of spike trains
Returns:
int: firing_rate """
firing_rate = np.count_nonzero(spike_train.tolist(), 1)
return firing_rate
@staticmethod
def scale_dependent_smoothness_measure(firing_rates: list):
"""Smoothem the firing rate depend on its scale. Smaller values corresponds to smoother series
Args:
firing_rates (list): List of number of active neurons per time step
Returns:
sd_diff (list): Float value signifies the smoothness of the semantic changes in firing rates
"""
diff = np.diff(firing_rates)
sd_diff = np.std(diff)
return sd_diff
@staticmethod
def scale_independent_smoothness_measure(firing_rates: list):
"""Smoothem the firing rate independent of its scale. Smaller values corresponds to smoother series
Args:
firing_rates (list): List of number of active neurons per time step
Returns:
coeff_var (list):Float value signifies the smoothness of the semantic changes in firing rates """
diff = np.diff(firing_rates)
mean_diff = np.mean(diff)
sd_diff = np.std(diff)
coeff_var = sd_diff / abs(mean_diff)
return coeff_var
@staticmethod
def autocorr(firing_rates: list, t: int = 2):
"""
Score interpretation
- scores near 1 imply a smoothly varying series
- scores near 0 imply that there's no overall linear relationship between a data point and the following one (that is, plot(x[-length(x)],x[-1]) won't give a scatter plot with any apparent linearity)
- scores near -1 suggest that the series is jagged in a particular way: if one point is above the mean, the next is likely to be below the mean by about the same amount, and vice versa.
Args:
firing_rates (list): Firing rates of the network
t (int, optional): Window size. Defaults to 2.
Returns:
array: Autocorrelation between neurons given their firing rates
"""
return np.corrcoef(
np.array(
[
firing_rates[0 : len(firing_rates) - t],
firing_rates[t : len(firing_rates)],
]
)
)
@staticmethod
def avg_corr_coeff(spike_train: np.array):
"""Measure Average Pearson correlation coeffecient between neurons
Args:
spike_train (array): Neural activity
Returns:
array: Average correlation coeffecient"""
corr_mat = np.corrcoef(np.asarray(spike_train).T)
avg_corr = np.sum(corr_mat, axis=1) / 200
corr_coeff = (
avg_corr.sum() / 200
) # 2D to 1D and either upper or lower half of correlation matrix.
return corr_mat, corr_coeff
@staticmethod
def spike_times(spike_train: np.array):
"""Get the time instants at which neuron spikes
Args:
spike_train (array): Spike trains of neurons
Returns:
(array): Spike time of each neurons in the pool"""
times = np.where(spike_train == 1.0)
return times
@staticmethod
def spike_time_intervals(spike_train):
"""Generate spike time intervals spike_trains
Args:
spike_train (array): Network activity
Returns:
list: Inter spike intervals for each neuron in the reservoir
"""
spike_times = Statistics.spike_times(spike_train)
isi = np.diff(spike_times[-1])
return isi
@staticmethod
def hamming_distance(actual_spike_train: np.array, perturbed_spike_train: np.array):
"""Hamming distance between true netorks states and perturbed network states
Args:
actual_spike_train (np.array): True network's states
perturbed_spike_train (np.array): Perturbated network's states
Returns:
float: Hamming distance between true and perturbed network states
"""
hd = [
np.count_nonzero(actual_spike_train[i] != perturbed_spike_train[i])
for i in range(len(actual_spike_train))
]
return hd
@staticmethod
def fanofactor(spike_train: np.array, neuron: int, window_size: int):
"""Investigate whether neuronal spike generation is a poisson process
Args:
spike_train (np.array): Spike train of neurons in the reservoir
neuron (int): Target neuron in the pool
window_size (int): Sliding window size for time step ranges to be considered for measuring the fanofactor
Returns:
float : Fano factor of the neuron spike train
"""
# Choose activity of random neuron
neuron_act = spike_train[:, neuron]
# Divide total observations into 'tws' time windows of size 'ws' for a neuron 60
tws = np.split(neuron_act, window_size)
fr = []
for i in range(len(tws)):
fr.append(np.count_nonzero(tws[i]))
# print('Firing rate of the neuron during each time window of size %s is %s' %(ws,fr))
mean_firing_rate = np.mean(fr)
variance_firing_rate = np.var(fr)
fano_factor = variance_firing_rate / mean_firing_rate
return mean_firing_rate, variance_firing_rate, fano_factor
@staticmethod
def spike_source_entropy(spike_train: np.array, num_neurons: int):
"""Measure the uncertainty about the origin of spike from the network using entropy
Args:
spike_train (np.array): Spike train of neurons
num_neurons (int): Number of neurons in the reservoir
Returns:
int : Spike source entropy of the network
"""
# Number of spikes from each neuron during the interval
n_spikes = np.count_nonzero(spike_train, axis=0)
p = n_spikes / np.count_nonzero(
spike_train
) # Probability of each neuron that can generate spike in next step
# print(p) # Note: pi shouldn't be zero
sse = np.sum([pi * np.log(pi) for pi in p]) / np.log(
1 / num_neurons
) # Spike source entropy
return sse
| 30.537801 | 207 | 0.596213 | from __future__ import division
import numpy as np
from scipy.stats import norm
import random
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import curve_fit
from scipy import stats
import networkx as nx
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
class Initializer(object):
def __init__(self):
pass
@staticmethod
def generate_strong_inp(length: int, reservoir_size: int):
inp = [0] * reservoir_size
x = [0] * length
idx = np.random.choice(length, np.random.randint(reservoir_size))
for i in idx:
x[i] = 1.0e4
inp[: len(x)] = x
return inp, idx
@staticmethod
def multi_one_hot_inp(ne: int, inputs: list, n_nodes_per_inp: int):
one_hot = np.zeros((ne, len(inputs)))
idxs = []
for _ in range(n_nodes_per_inp):
idxs.append(random.sample(range(0, ne), len(inputs)))
idxs = list(zip(*idxs))
j = 0
for idx_list in idxs:
for i in idx_list:
one_hot[i][j] = 1
j += 1
return one_hot, idxs
@staticmethod
def generate_gaussian_inputs(length: int, reservoir_size: int):
out = [0] * reservoir_size
x = [0] * length
idx = np.random.choice(length, np.random.randint(reservoir_size))
inp = np.random.normal(length)
for i in idx:
x[i] = inp[i]
out[: len(x)] = x
return out, idx
@staticmethod
def normalize_weight_matrix(weight_matrix: np.array):
normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0)
return normalized_weight_matrix
@staticmethod
def generate_lambd_connections(
synaptic_connection: str, ne: int, ni: int, lambd_w: int, lambd_std: int
):
if synaptic_connection == "EE":
lambdas_incoming = norm.ppf(
np.random.random(ne), loc=lambd_w, scale=lambd_std
).astype(int)
list_neurons = list(range(ne))
connection_weights = np.zeros((ne, ne))
ncoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming))
global_incoming_weights_idx = 0
for neuron in list_neurons:
possible_connections = list_neurons.copy()
possible_connections.remove(
neuron
)
possible_incoming_connections = random.sample(
possible_connections, lambdas_incoming[neuron]
)
incoming_weights_neuron = global_incoming_weights[
global_incoming_weights_idx : global_incoming_weights_idx
+ lambdas_incoming[neuron]
]
for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron):
connection_weights[possible_incoming_connections[incoming_idx]][
neuron
] = incoming_weight
global_incoming_weights_idx += lambdas_incoming[neuron]
return connection_weights
if synaptic_connection == "EI":
lambdas = norm.ppf(
np.random.random(ni), loc=lambd_w, scale=lambd_std
).astype(int)
list_neurons = list(range(ni))
connection_weights = np.zeros((ni, ne))
global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas))
global_outgoing_weights_idx = 0
for neuron in list_neurons:
possible_connections = list(range(ne))
possible_outgoing_connections = random.sample(
possible_connections, lambdas[neuron]
)
outgoing_weights = global_outgoing_weights[
global_outgoing_weights_idx : global_outgoing_weights_idx
+ lambdas[neuron]
]
for outgoing_idx, outgoing_weight in enumerate(
outgoing_weights
):
connection_weights[neuron][
possible_outgoing_connections[outgoing_idx]
] = outgoing_weight
global_outgoing_weights_idx += lambdas[neuron]
return connection_weights
@staticmethod
def get_incoming_connection_dict(weights: np.array):
connection_dict = dict.fromkeys(range(1, len(weights) + 1), 0)
for i in range(len(weights[0])):
connection_dict[i] = list(np.nonzero(weights[:, i])[0])
return connection_dict
@staticmethod
def get_outgoing_connection_dict(weights: np.array):
connection_dict = dict.fromkeys(range(1, len(weights) + 1), 1)
for i in range(len(weights[0])):
connection_dict[i] = list(np.nonzero(weights[i, :])[0])
return connection_dict
@staticmethod
def prune_small_weights(weights: np.array, cutoff_weight: float):
weights[weights <= cutoff_weight] = cutoff_weight
return weights
@staticmethod
def set_max_cutoff_weight(weights: np.array, cutoff_weight: float):
weights[weights > cutoff_weight] = cutoff_weight
return weights
@staticmethod
def get_unconnected_indexes(wee: np.array):
i, j = np.where(wee <= 0.0)
indices = list(zip(i, j))
self_conn_removed = []
for i, idxs in enumerate(indices):
if idxs[0] != idxs[1]:
self_conn_removed.append(indices[i])
return self_conn_removed
@staticmethod
def white_gaussian_noise(mu: float, sigma: float, t: int):
noise = np.random.normal(mu, sigma, t)
return np.expand_dims(noise, 1)
@staticmethod
def zero_sum_incoming_check(weights: np.array):
zero_sum_incomings = np.where(np.sum(weights, axis=0) == 0.0)
if len(zero_sum_incomings[-1]) == 0:
return weights
else:
for zero_sum_incoming in zero_sum_incomings[-1]:
rand_indices = np.random.randint(
int(weights.shape[0] * 0.2), size=2
)
rand_values = np.random.uniform(0.0, 0.1, 2)
for i, idx in enumerate(rand_indices):
weights[:, zero_sum_incoming][idx] = rand_values[i]
return weights
class Plotter(object):
def __init__(self):
pass
@staticmethod
def hist_incoming_conn(
weights: np.array, bin_size: int, histtype: str, savefig: bool
):
num_incoming_weights = np.sum(np.array(weights) > 0, axis=0)
plt.figure(figsize=(12, 5))
plt.xlabel("Number of connections")
plt.ylabel("Probability")
mu, std = norm.fit(num_incoming_weights)
plt.hist(num_incoming_weights, bins=bin_size, density=True, alpha=0.6, color='b')
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, max(num_incoming_weights))
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Distribution of presynaptic connections: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
if savefig:
plt.savefig("hist_incoming_conn")
return plt.show()
@staticmethod
def hist_outgoing_conn(
weights: np.array, bin_size: int, histtype: str, savefig: bool
):
num_outgoing_weights = np.sum(np.array(weights) > 0, axis=1)
plt.figure(figsize=(12, 5))
plt.xlabel("Number of connections")
plt.ylabel("Probability")
mu, std = norm.fit(num_outgoing_weights)
plt.hist(num_outgoing_weights, bins=bin_size, density=True, alpha=0.6, color='b')
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, max(num_outgoing_weights))
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Distribution of post synaptic connections: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
if savefig:
plt.savefig("hist_outgoing_conn")
return plt.show()
@staticmethod
def network_connection_dynamics(
connection_counts: np.array, savefig: bool
):
_, ax1 = plt.subplots(figsize=(12, 5))
ax1.plot(connection_counts, label="Connection dynamics")
plt.margins(x=0)
ax1.set_xticks(ax1.get_xticks()[::2])
ax1.set_title("Network connection dynamics")
plt.ylabel("Number of active connections")
plt.xlabel("Time step")
plt.legend(loc="upper right")
plt.tight_layout()
if savefig:
plt.savefig("connection_dynamics")
return plt.show()
@staticmethod
def hist_firing_rate_network(spike_train: np.array, bin_size: int, savefig: bool):
fr = np.count_nonzero(spike_train.tolist(), 1)
fr = list(filter(lambda a: a != 0, fr))
plt.title("Distribution of population activity without inactive time steps")
plt.xlabel("Spikes/time step")
plt.ylabel("Count")
plt.hist(fr, bin_size)
if savefig:
plt.savefig("hist_firing_rate_network.png")
return plt.show()
@staticmethod
def scatter_plot(spike_train: np.array, savefig: bool):
spike_train = np.asarray(spike_train)
x, y = np.argwhere(spike_train.T == 1).T
plt.figure(figsize=(8, 5))
firing_rates = Statistics.firing_rate_network(spike_train).tolist()
plt.plot(firing_rates, label="Firing rate")
plt.legend(loc="upper left")
plt.scatter(y, x, s=0.1, color="black")
plt.title('Spike Trains')
plt.xlabel("Time step")
plt.ylabel("Neuron")
plt.legend(loc="upper left")
if savefig:
plt.savefig("ScatterSpikeTrain.png")
return plt.show()
@staticmethod
def raster_plot(spike_train: np.array, savefig: bool):
spike_train = np.asarray(spike_train)
plt.figure(figsize=(11, 6))
firing_rates = Statistics.firing_rate_network(spike_train).tolist()
plt.plot(firing_rates, label="Firing rate")
plt.legend(loc="upper left")
plt.title('Spike Trains')
x, y = np.argwhere(spike_train.T == 1).T
plt.plot(y, x, "|r")
plt.xlabel("Time step")
plt.ylabel("Neuron")
if savefig:
plt.savefig("RasterSpikeTrain.png")
return plt.show()
@staticmethod
def correlation(corr: np.array, savefig: bool):
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(
corr,
mask=mask,
cmap=cmap,
xticklabels=5,
yticklabels=5,
vmax=0.1,
center=0,
square=False,
linewidths=0.0,
cbar_kws={"shrink": 0.9},
)
if savefig:
plt.savefig("Correlation between neurons")
return None
@staticmethod
def isi_exponential_fit(
spike_train: np.array, neuron: int, bin_size: int, savefig: bool
):
isi = Statistics.spike_time_intervals(spike_train[:,neuron])
y, x = np.histogram(sorted(isi), bins=bin_size)
x = [int(i) for i in x]
y = [float(i) for i in y]
def exponential_func(y, a, b, c):
return a * np.exp(-b * np.array(y)) - c
popt, _ = curve_fit(exponential_func, x[1:bin_size], y[1:bin_size])
plt.plot(
x[1:bin_size],
exponential_func(x[1:bin_size], *popt),
label="Exponential fit",
)
plt.title('Distribution of Inter Spike Intervals and Exponential Curve Fit')
plt.scatter(x[1:bin_size], y[1:bin_size], s=2.0, color="black", label="ISI")
plt.xlabel("ISI")
plt.ylabel("Frequency")
plt.legend()
if savefig:
plt.savefig("isi_exponential_fit")
return plt.show()
@staticmethod
def weight_distribution(weights: np.array, bin_size: int, savefig: bool):
weights = weights[
weights >= 0.01
] ts, bins=bin_size)
plt.title('Synaptic weight distribution')
plt.scatter(x[:-1], y, s=2.0, c="black")
plt.xlabel("Connection strength")
plt.ylabel("Frequency")
if savefig:
plt.savefig("weight_distribution")
return plt.show()
@staticmethod
def linear_lognormal_fit(weights: np.array, num_points: int, savefig: bool):
weights = np.array(weights.tolist())
weights = weights[weights >= 0.01]
M = float(np.mean(weights))
s = float(np.std(weights))
mu = float(np.mean(np.log(weights)))
sigma = float(np.std(np.log(weights)))
shape = sigma
scale = np.exp(mu) # Scipy's scale parameter
median = np.exp(mu)
mode = np.exp(mu - sigma ** 2)
mean = np.exp(mu + (sigma ** 2 / 2))
x = np.linspace(
np.min(weights), np.max(weights), num=num_points
)
pdf = stats.lognorm.pdf(
x, shape, loc=0, scale=scale
)
plt.figure(figsize=(12, 4.5))
plt.title('Curve fit on connection weight distribution')
plt.subplot(121)
plt.plot(x, pdf)
plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode")
plt.vlines(
mean,
0,
stats.lognorm.pdf(mean, shape, loc=0, scale=scale),
linestyle="--",
color="green",
label="Mean",
)
plt.vlines(
median,
0,
stats.lognorm.pdf(median, shape, loc=0, scale=scale),
color="blue",
label="Median",
)
plt.ylim(ymin=0)
plt.xlabel("Weight")
plt.title("Linear scale")
plt.legend()
plt.subplot(122)
plt.semilogx(x, pdf)
plt.vlines(mode, 0, pdf.max(), linestyle=":", label="Mode")
plt.vlines(
mean,
0,
stats.lognorm.pdf(mean, shape, loc=0, scale=scale),
linestyle="--",
color="green",
label="Mean",
)
plt.vlines(
median,
0,
stats.lognorm.pdf(median, shape, loc=0, scale=scale),
color="blue",
label="Median",
)
plt.ylim(ymin=0)
plt.xlabel("Weight")
plt.title("Logarithmic scale")
plt.legend()
if savefig:
plt.savefig("LinearLognormalFit")
return plt.show()
@staticmethod
def plot_network(corr: np.array, corr_thres: float, fig_name: str = None):
df = pd.DataFrame(corr)
links = df.stack().reset_index()
links.columns = ["var1", "var2", "value"]
links_filtered = links.loc[
(links["value"] > corr_thres) & (links["var1"] != links["var2"])
]
G = nx.from_pandas_edgelist(links_filtered, "var1", "var2")
plt.figure(figsize=(50, 50))
nx.draw(
G,
with_labels=True,
node_color="orange",
node_size=50,
linewidths=5,
font_size=10,
)
plt.text(0.1, 0.9, "%s" % corr_thres)
plt.savefig("%s" % fig_name)
plt.show()
@staticmethod
def hamming_distance(hamming_dist: list, savefig: bool):
plt.figure(figsize=(15, 6))
plt.title("Hamming distance between actual and perturbed states")
plt.xlabel("Time steps")
plt.ylabel("Hamming distance")
plt.plot(hamming_dist)
if savefig:
plt.savefig("HammingDistance")
return plt.show()
class Statistics(object):
def __init__(self):
pass
@staticmethod
def firing_rate_neuron(spike_train: np.array, neuron: int, bin_size: int):
time_period = len(spike_train[:, 0])
neuron_spike_train = spike_train[:, neuron]
samples_spike_train = [
neuron_spike_train[i : i + bin_size]
for i in range(0, len(neuron_spike_train), bin_size)
]
spike_rate = 0.0
for _, spike_train in enumerate(samples_spike_train):
spike_rate += list(spike_train).count(1.0)
spike_rate = spike_rate * bin_size / time_period
return time_period, bin_size, spike_rate
@staticmethod
def firing_rate_network(spike_train: np.array):
firing_rate = np.count_nonzero(spike_train.tolist(), 1)
return firing_rate
@staticmethod
def scale_dependent_smoothness_measure(firing_rates: list):
diff = np.diff(firing_rates)
sd_diff = np.std(diff)
return sd_diff
@staticmethod
def scale_independent_smoothness_measure(firing_rates: list):
diff = np.diff(firing_rates)
mean_diff = np.mean(diff)
sd_diff = np.std(diff)
coeff_var = sd_diff / abs(mean_diff)
return coeff_var
@staticmethod
def autocorr(firing_rates: list, t: int = 2):
return np.corrcoef(
np.array(
[
firing_rates[0 : len(firing_rates) - t],
firing_rates[t : len(firing_rates)],
]
)
)
@staticmethod
def avg_corr_coeff(spike_train: np.array):
corr_mat = np.corrcoef(np.asarray(spike_train).T)
avg_corr = np.sum(corr_mat, axis=1) / 200
corr_coeff = (
avg_corr.sum() / 200
)
return corr_mat, corr_coeff
@staticmethod
def spike_times(spike_train: np.array):
times = np.where(spike_train == 1.0)
return times
@staticmethod
def spike_time_intervals(spike_train):
spike_times = Statistics.spike_times(spike_train)
isi = np.diff(spike_times[-1])
return isi
@staticmethod
def hamming_distance(actual_spike_train: np.array, perturbed_spike_train: np.array):
hd = [
np.count_nonzero(actual_spike_train[i] != perturbed_spike_train[i])
for i in range(len(actual_spike_train))
]
return hd
@staticmethod
def fanofactor(spike_train: np.array, neuron: int, window_size: int):
neuron_act = spike_train[:, neuron]
tws = np.split(neuron_act, window_size)
fr = []
for i in range(len(tws)):
fr.append(np.count_nonzero(tws[i]))
mean_firing_rate = np.mean(fr)
variance_firing_rate = np.var(fr)
fano_factor = variance_firing_rate / mean_firing_rate
return mean_firing_rate, variance_firing_rate, fano_factor
@staticmethod
def spike_source_entropy(spike_train: np.array, num_neurons: int):
n_spikes = np.count_nonzero(spike_train, axis=0)
p = n_spikes / np.count_nonzero(
spike_train
)
np.log(pi) for pi in p]) / np.log(
1 / num_neurons
) # Spike source entropy
return sse
| true | true |
f72b65415eb00899b5a39a72574ea82cbc1d04c6 | 21,954 | py | Python | Lib/site-packages/django/conf/global_settings.py | Lucas11200/LocaPy | 5d1f214c091aa3703b2ff7d3c0713a91ed4a1f48 | [
"bzip2-1.0.6"
] | 42 | 2019-03-01T09:51:13.000Z | 2021-07-22T12:22:49.000Z | Lib/site-packages/django/conf/global_settings.py | Lucas11200/LocaPy | 5d1f214c091aa3703b2ff7d3c0713a91ed4a1f48 | [
"bzip2-1.0.6"
] | 31 | 2018-08-26T14:01:16.000Z | 2018-10-19T07:35:57.000Z | virtual/lib/python3.6/site-packages/django/conf/global_settings.py | eyern/instagram_clone | c18da15b35d28d91c3f63904af9d5da4e8e3e8ae | [
"MIT"
] | 145 | 2019-03-14T18:54:45.000Z | 2022-03-04T20:25:31.000Z | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| 34.518868 | 101 | 0.701603 |
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_EMAIL = 'root@localhost'
DATABASES = {}
DATABASE_ROUTERS = []
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_LOCALTIME = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
INSTALLED_APPS = []
TEMPLATES = []
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_SUBJECT_PREFIX = '[Django] '
APPEND_SLASH = True
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
FILE_UPLOAD_TEMP_DIR = None
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
PERMISSIONS = None
FORMAT_MODULE_PATH = None
_FORMAT = 'N j, Y'
TIME_FORMAT = 'N j, Y, P'
_FORMAT = 'P'
_MONTH_FORMAT = 'F Y'
H_DAY_FORMAT = 'F j'
T_DATE_FORMAT = 'm/d/Y'
T_DATETIME_FORMAT = 'm/d/Y P'
TS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y',
'%b %d %Y', '%b %d, %Y',
'%d %b %Y', '%d %b, %Y',
'%B %d %Y', '%B %d, %Y',
'%d %B %Y', '%d %B, %Y',
]
TS = [
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M',
]
ORMATS = [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d',
'%m/%d/%Y %H:%M:%S',
'%m/%d/%Y %H:%M:%S.%f',
'%m/%d/%Y %H:%M',
'%m/%d/%Y',
'%m/%d/%y %H:%M:%S',
'%m/%d/%y %H:%M:%S.%f',
'%m/%d/%y %H:%M',
'%m/%d/%y',
]
FIRST_DAY_OF_WEEK = 0
DECIMAL_SEPARATOR = '.'
USE_THOUSAND_SEPARATOR = False
NUMBER_GROUPING = 0
THOUSAND_SEPARATOR = ','
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
SESSION_FILE_PATH = None
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
ckends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
= 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
ERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| true | true |
f72b65ae791790eb0c6dae59632f170bfaa9b26d | 984 | py | Python | setup.py | prafulbagai/uwsgi-sloth | b19b9a7e6a0b8edfdc94bfbe9f7a0030ab95db03 | [
"Apache-2.0"
] | 127 | 2015-01-02T11:57:22.000Z | 2022-03-03T02:23:54.000Z | setup.py | prafulbagai/uwsgi-sloth | b19b9a7e6a0b8edfdc94bfbe9f7a0030ab95db03 | [
"Apache-2.0"
] | 8 | 2015-06-15T12:10:13.000Z | 2019-07-21T23:01:18.000Z | setup.py | prafulbagai/uwsgi-sloth | b19b9a7e6a0b8edfdc94bfbe9f7a0030ab95db03 | [
"Apache-2.0"
] | 20 | 2015-01-06T03:27:25.000Z | 2020-09-04T03:53:46.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from setuptools import setup, find_packages
# **Python version check**
if sys.version_info < (3, 5):
error = """
uwsgi-sloth only supports Python 3.5 and above.
If you are using Python 2.7, please install "uwsgi-sloth<3.0.0" instead.
"""
print(error, file=sys.stderr)
sys.exit(1)
setup(
name='uwsgi-sloth',
version='3.0.2',
description='A simple uwsgi access log analyzer',
long_description=open('README.rst').read(),
author='piglei',
author_email='piglei2007@gmail.com',
url='https://github.com/piglei/uwsgi-sloth',
keywords='uwsgi log analyzer',
license='Apache License, Version 2.0',
packages=find_packages(),
package_data={"": ['templates/*.html', 'sample.conf']},
classifiers=[
"Programming Language :: Python :: 3",
],
install_requires=[
'jinja2',
'configobj'
],
scripts=['uwsgi_sloth/uwsgi-sloth']
)
| 24.6 | 72 | 0.648374 |
from __future__ import print_function
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 5):
error = """
uwsgi-sloth only supports Python 3.5 and above.
If you are using Python 2.7, please install "uwsgi-sloth<3.0.0" instead.
"""
print(error, file=sys.stderr)
sys.exit(1)
setup(
name='uwsgi-sloth',
version='3.0.2',
description='A simple uwsgi access log analyzer',
long_description=open('README.rst').read(),
author='piglei',
author_email='piglei2007@gmail.com',
url='https://github.com/piglei/uwsgi-sloth',
keywords='uwsgi log analyzer',
license='Apache License, Version 2.0',
packages=find_packages(),
package_data={"": ['templates/*.html', 'sample.conf']},
classifiers=[
"Programming Language :: Python :: 3",
],
install_requires=[
'jinja2',
'configobj'
],
scripts=['uwsgi_sloth/uwsgi-sloth']
)
| true | true |
f72b65b992c41b19e9209c007df907202066b8d1 | 2,817 | py | Python | weatherScraper/spiders/weatherbot.py | aabedi/weatherScraper | 069d07c19fbbba93aad5499cd1bb400accbcf644 | [
"MIT"
] | null | null | null | weatherScraper/spiders/weatherbot.py | aabedi/weatherScraper | 069d07c19fbbba93aad5499cd1bb400accbcf644 | [
"MIT"
] | null | null | null | weatherScraper/spiders/weatherbot.py | aabedi/weatherScraper | 069d07c19fbbba93aad5499cd1bb400accbcf644 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from weatherScraper.items import TempData
from weatherScraper.items import InputData
import scrapy
class WeatherbotSpider(scrapy.Spider):
name = 'weatherbot'
allowed_domains = ['www.wunderground.com']
start_urls = ['http://www.wunderground.com/history/']
def __init__(self, code='', month='', day='', year='', *args, **kwargs): # this will allow spider arguments
super(WeatherbotSpider, self).__init__(*args, **kwargs)
global user_input
user_input = InputData()
user_input['code'] = code
user_input['month'] = month
user_input['day'] = day
user_input['year'] = year
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formnumber=1, # formnumber set to 1 because location and date are the second form on history page
formdata={'code': user_input['code'],
'month': user_input['month'],
'day': user_input['day'],
'year': user_input['year']},
callback=self.after_post
)
def after_post(self, response):
# check input successful before moving on
if "location you entered was not found" in response.body:
self.logger.error("Location not valid")
return
temperatures = TempData()
# Extract each temperature needed using corresponding css tags
temperatures['actual_mean_temp'] = response.css('#historyTable tr:nth-child(2) .wx-value::text').extract()
temperatures['avg_mean_temp'] = response.css('tr:nth-child(2) .indent~ td+ td .wx-value::text').extract()
temperatures['actual_max_temp'] = response.css('tr:nth-child(3) .indent+ td .wx-value::text').extract()
temperatures['avg_max_temp'] = response.css('#historyTable tr:nth-child(3) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_max_temp'] = response.css('tr:nth-child(3) td:nth-child(4) .wx-value::text').extract()
temperatures['actual_min_temp'] = response.css('tr:nth-child(4) .indent+ td .wx-value::text').extract()
temperatures['avg_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(4) .wx-value::text')\
.extract()
# Check if Fahrenheit or Celsius, then append correct unit
if 'C' in response.css('tr:nth-child(3) .indent+ td .wx-unit::text'):
for key, value in temperatures.iteritems():
value.append('C')
else:
for key, value in temperatures.iteritems():
value.append('F')
yield temperatures
| 48.568966 | 120 | 0.619453 |
from weatherScraper.items import TempData
from weatherScraper.items import InputData
import scrapy
class WeatherbotSpider(scrapy.Spider):
name = 'weatherbot'
allowed_domains = ['www.wunderground.com']
start_urls = ['http://www.wunderground.com/history/']
def __init__(self, code='', month='', day='', year='', *args, **kwargs):
super(WeatherbotSpider, self).__init__(*args, **kwargs)
global user_input
user_input = InputData()
user_input['code'] = code
user_input['month'] = month
user_input['day'] = day
user_input['year'] = year
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formnumber=1,
formdata={'code': user_input['code'],
'month': user_input['month'],
'day': user_input['day'],
'year': user_input['year']},
callback=self.after_post
)
def after_post(self, response):
if "location you entered was not found" in response.body:
self.logger.error("Location not valid")
return
temperatures = TempData()
temperatures['actual_mean_temp'] = response.css('#historyTable tr:nth-child(2) .wx-value::text').extract()
temperatures['avg_mean_temp'] = response.css('tr:nth-child(2) .indent~ td+ td .wx-value::text').extract()
temperatures['actual_max_temp'] = response.css('tr:nth-child(3) .indent+ td .wx-value::text').extract()
temperatures['avg_max_temp'] = response.css('#historyTable tr:nth-child(3) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_max_temp'] = response.css('tr:nth-child(3) td:nth-child(4) .wx-value::text').extract()
temperatures['actual_min_temp'] = response.css('tr:nth-child(4) .indent+ td .wx-value::text').extract()
temperatures['avg_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(4) .wx-value::text')\
.extract()
if 'C' in response.css('tr:nth-child(3) .indent+ td .wx-unit::text'):
for key, value in temperatures.iteritems():
value.append('C')
else:
for key, value in temperatures.iteritems():
value.append('F')
yield temperatures
| true | true |
f72b65ccd73696678a74b7a0c74f72dda1f6b69c | 3,974 | py | Python | flink-ml-framework/python/setup.py | yangqi199808/dl-on-flink | 3b2ab15ce06e877f90997f5950df44bc30b88b29 | [
"Apache-2.0"
] | null | null | null | flink-ml-framework/python/setup.py | yangqi199808/dl-on-flink | 3b2ab15ce06e877f90997f5950df44bc30b88b29 | [
"Apache-2.0"
] | null | null | null | flink-ml-framework/python/setup.py | yangqi199808/dl-on-flink | 3b2ab15ce06e877f90997f5950df44bc30b88b29 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The flink-ai-extended Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'flink_ml_framework/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load flink_ml_framework version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
if platform.system() == "Linux":
build_args += ['-lpthread']
env = os.environ.copy()
env[
'CXXFLAGS'] = '{} -D_GLIBCXX_USE_CXX11_ABI=0 -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
setup(
name='flink_ml_framework',
version=VERSION,
include_package_data=True,
packages=find_packages(),
ext_modules=[CMakeExtension('flink_ml_framework/flink_ml_framework')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
url='https://github.com/flink-extended/dl-on-flink',
license='https://www.apache.org/licenses/LICENSE-2.0'
)
| 36.458716 | 89 | 0.605435 |
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'flink_ml_framework/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load flink_ml_framework version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
if platform.system() == "Linux":
build_args += ['-lpthread']
env = os.environ.copy()
env[
'CXXFLAGS'] = '{} -D_GLIBCXX_USE_CXX11_ABI=0 -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
setup(
name='flink_ml_framework',
version=VERSION,
include_package_data=True,
packages=find_packages(),
ext_modules=[CMakeExtension('flink_ml_framework/flink_ml_framework')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
url='https://github.com/flink-extended/dl-on-flink',
license='https://www.apache.org/licenses/LICENSE-2.0'
)
| true | true |
f72b65fef7e833abe364975fab274dd40f6a20f9 | 359 | py | Python | predict.py | TomG4/flair | 2db057ec60c25d55f69622a6a6881aedae13be49 | [
"MIT"
] | null | null | null | predict.py | TomG4/flair | 2db057ec60c25d55f69622a6a6881aedae13be49 | [
"MIT"
] | null | null | null | predict.py | TomG4/flair | 2db057ec60c25d55f69622a6a6881aedae13be49 | [
"MIT"
] | null | null | null | #Test
from flair.data import Sentence
from flair.models import SequenceTagger
tagger: SequenceTagger = SequenceTagger.load("ner")
sentence: Sentence = Sentence("George Washington went to Washington .")
tagger.predict(sentence)
print("Analysing the sentence %s" % sentence)
print("\nThe following NER tags are found: \n")
print(sentence.to_tagged_string())
| 27.615385 | 71 | 0.78273 |
from flair.data import Sentence
from flair.models import SequenceTagger
tagger: SequenceTagger = SequenceTagger.load("ner")
sentence: Sentence = Sentence("George Washington went to Washington .")
tagger.predict(sentence)
print("Analysing the sentence %s" % sentence)
print("\nThe following NER tags are found: \n")
print(sentence.to_tagged_string())
| true | true |
f72b66e8e4c78293736c0c00b94f7ce5be92fa9b | 35,288 | py | Python | owscapable/csw.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | 1 | 2016-02-01T12:55:13.000Z | 2016-02-01T12:55:13.000Z | owscapable/csw.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | 1 | 2015-06-23T14:07:50.000Z | 2015-06-23T14:07:50.000Z | owscapable/csw.py | b-cube/OwsCapable | a01815418fe982434503d6542cb18e1ac8989684 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" CSW request and response processor """
from __future__ import (absolute_import, division, print_function)
import base64
import inspect
import warnings
import StringIO
import random
from urllib import urlencode
from urllib2 import Request, urlopen
from owscapable.util import OrderedDict
from owscapable.etree import etree
from owscapable import fes
from owscapable import util
from owscapable import ows
from owscapable.iso import MD_Metadata
from owscapable.fgdc import Metadata
from owscapable.dif import DIF
from owscapable.namespaces import Namespaces
from owscapable.util import cleanup_namespaces, bind_url, add_namespaces
# default variables
outputformat = 'application/xml'
def get_namespaces():
n = Namespaces()
return n.get_namespaces()
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'
schema_location = '%s %s' % (namespaces['csw'], schema)
class CatalogueServiceWeb:
""" csw request class """
def __init__(self, url, xml=None, lang='en-US', version='2.0.2', timeout=10, skip_caps=False,
username=None, password=None):
"""
Construct and process a GetCapabilities request
Parameters
----------
- url: the URL of the CSW
- lang: the language (default is 'en-US')
- version: version (default is '2.0.2')
- timeout: timeout in seconds
- skip_caps: whether to skip GetCapabilities processing on init (default is False)
- username: username for HTTP basic authentication
- password: password for HTTP basic authentication
"""
self.url = url
self.lang = lang
self.version = version
self.timeout = timeout
self.username = username
self.password = password
self.service = 'CSW'
self.exceptionreport = None
self.owscommon = ows.OwsCommon('1.0.0')
if not skip_caps: # process GetCapabilities
if xml:
# load from the response to get _exml
self._parse_response(xml)
else:
# construct request
data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
# ServiceIdentification
val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))
self.identification=ows.ServiceIdentification(val,self.owscommon.namespace)
# ServiceProvider
val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))
self.provider=ows.ServiceProvider(val,self.owscommon.namespace)
# ServiceOperations metadata
self.operations=[]
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))
# for harmonization
self.contents = None
# FilterCapabilities
val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))
self.filters=fes.FilterCapabilities(val)
def describerecord(self, typename='csw:Record', format=outputformat):
"""
Construct and process DescribeRecord request
Parameters
----------
- typename: the typename to describe (default is 'csw:Record')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
node0 = self._setrootelement('csw:DescribeRecord')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set('outputFormat', format)
node0.set('schemaLanguage', namespaces['xs2'])
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename
self.request = node0
self._invoke()
# parse result
# TODO: process the XML Schema (you're on your own for now with self.response)
def getdomain(self, dname, dtype='parameter'):
"""
Construct and process a GetDomain request
Parameters
----------
- dname: the value of the Parameter or Property to query
- dtype: whether to query a parameter (parameter) or property (property)
"""
# construct request
dtypename = 'ParameterName'
node0 = self._setrootelement('csw:GetDomain')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if dtype == 'property':
dtypename = 'PropertyName'
etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')
self.results['type'] = util.testXMLValue(val, True)
val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))
self.results[dtype] = util.testXMLValue(val)
# get the list of values associated with the Domain
self.results['values'] = []
for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):
self.results['values'].append(util.testXMLValue(f))
def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None, esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- typenames: the typeNames to query against (default is csw:Record)
- propertyname: the PropertyName to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- sortby: property to sort results on
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
warnings.warn("""Please use the updated 'getrecords2' method instead of 'getrecords'.
The 'getrecords' method will be upgraded to use the 'getrecords2' parameters
in a future version of OWSLib.""")
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('resultType', resulttype)
node0.set('service', self.service)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)
if sortby is not None:
fes.setsortby(node1, sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):
"""
Construct and process a GetRecordById request
Parameters
----------
- id: the list of Ids
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
data = {
'service': self.service,
'version': self.version,
'request': 'GetRecordById',
'outputFormat': format,
'outputSchema': outputschema,
'elementsetname': esn,
'id': ','.join(id),
}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
self.results = {}
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary', outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: the list of constraints (OgcExpression from owslib.fes module)
- sortby: an OGC SortBy object (SortBy from owslib.fes module)
- typenames: the typeNames to query against (default is csw:Record)
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('service', self.service)
node0.set('resultType', resulttype)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
if any([len(constraints) > 0, cql is not None]):
node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))
node2.set('version', '1.1.0')
flt = fes.FilterRequest()
if len(constraints) > 0:
node2.append(flt.setConstraintList(constraints))
# Now add a CQL filter if passed in
elif cql is not None:
etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql
if sortby is not None and isinstance(sortby, fes.SortBy):
node1.append(sortby.toXML())
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
if val is not None:
self.results['nextrecord'] = int(util.testXMLValue(val, True))
else:
warnings.warn("""CSW Server did not supply a nextRecord value (it is optional), so the client
should page through the results in another way.""")
# For more info, see:
# https://github.com/geopython/OWSLib/issues/100
self.results['nextrecord'] = None
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None, bbox=None, keywords=[], cql=None, identifier=None):
"""
Construct and process a Transaction request
Parameters
----------
- ttype: the type of transaction 'insert, 'update', 'delete'
- typename: the typename to describe (default is 'csw:Record')
- record: the XML record to insert
- propertyname: the RecordProperty/PropertyName to Filter against
- propertyvalue: the RecordProperty Value to Filter against (for updates)
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- keywords: list of keywords
- cql: common query language text. Note this overrides bbox, qtype, keywords
- identifier: record identifier. Note this overrides bbox, qtype, keywords, cql
"""
# construct request
node0 = self._setrootelement('csw:Transaction')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
validtransactions = ['insert', 'update', 'delete']
if ttype not in validtransactions: # invalid transaction
raise RuntimeError('Invalid transaction \'%s\'.' % ttype)
node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))
if ttype != 'update':
node1.set('typeName', typename)
if ttype == 'insert':
if record is None:
raise RuntimeError('Nothing to insert.')
node1.append(etree.fromstring(record))
if ttype == 'update':
if record is not None:
node1.append(etree.fromstring(record))
else:
if propertyname is not None and propertyvalue is not None:
node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))
etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname
etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, identifier)
if ttype == 'delete':
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
self._parsetransactionsummary()
self._parseinsertresult()
def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):
"""
Construct and process a Harvest request
Parameters
----------
- source: a URI to harvest
- resourcetype: namespace identifying the type of resource
- resourceformat: MIME type of the resource
- harvestinterval: frequency of harvesting, in ISO8601
- responsehandler: endpoint that CSW should responsd to with response
"""
# construct request
node0 = self._setrootelement('csw:Harvest')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source
etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype
if resourceformat is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat
if harvestinterval is not None:
etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval
if responsehandler is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))
if util.testXMLValue(val) is not None:
ts = val.attrib.get('timeStamp')
self.timestamp = util.testXMLValue(ts, True)
id = val.find(util.nspath_eval('csw:RequestId', namespaces))
self.id = util.testXMLValue(id)
else:
self._parsetransactionsummary()
self._parseinsertresult()
def get_operation_by_name(self, name):
"""Return a named operation"""
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError("No operation named %s" % name)
def getService_urls(self, service_string=None):
"""
Return easily identifiable URLs for all service types
Parameters
----------
- service_string: a URI to lookup
"""
urls=[]
for key,rec in self.records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def _parseinsertresult(self):
self.results['insertresults'] = []
for i in self._exml.findall(util.nspath_eval('csw:InsertResult', namespaces)):
for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):
self.results['insertresults'].append(util.testXMLValue(j))
def _parserecords(self, outputschema, esn):
if outputschema == namespaces['gmd']: # iso 19139
for i in self._exml.findall('.//'+util.nspath_eval('gmd:MD_Metadata', namespaces)) or self._exml.findall('.//'+util.nspath_eval('gmi:MI_Metadata', namespaces)):
val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = MD_Metadata(i)
elif outputschema == namespaces['fgdc']: # fgdc csdgm
for i in self._exml.findall('.//metadata'):
val = i.find('idinfo/datasetid')
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = Metadata(i)
elif outputschema == namespaces['dif']: # nasa dif
for i in self._exml.findall('.//'+util.nspath_eval('dif:DIF', namespaces)):
val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = DIF(i)
else: # process default
for i in self._exml.findall('.//'+util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):
val = i.find(util.nspath_eval('dc:identifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = CswRecord(i)
def _parsetransactionsummary(self):
val = self._exml.find(util.nspath_eval('csw:TransactionSummary', namespaces))
if val is not None:
rid = val.attrib.get('requestId')
self.results['requestid'] = util.testXMLValue(rid, True)
ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))
self.results['inserted'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))
self.results['updated'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))
self.results['deleted'] = int(util.testXMLValue(ts))
def _setesnel(self, esn):
""" Set the element name to parse depending on the ElementSetName requested """
el = 'Record'
if esn == 'brief':
el = 'BriefRecord'
if esn == 'summary':
el = 'SummaryRecord'
return el
def _setidentifierkey(self, el):
if el is None:
return 'owslib_random_%i' % random.randint(1,65536)
else:
return el
def _setrootelement(self, el):
if etree.__name__ == 'lxml.etree': # apply nsmap
return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)
else:
return etree.Element(util.nspath_eval(el, namespaces))
def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None, identifier=None):
if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:
node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))
node0.set('version', '1.1.0')
if identifier is not None: # set identifier filter, overrides all other parameters
flt = fes.FilterRequest()
node0.append(flt.set(identifier=identifier))
elif cql is not None: # send raw CQL query
# CQL passed, overrides all other parameters
node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))
node1.text = cql
else: # construct a Filter request
flt = fes.FilterRequest()
node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname,bbox=bbox))
def _invoke(self):
# do HTTP request
if isinstance(self.request, basestring): # GET KVP
req = Request(self.request)
if self.username is not None and self.password is not None:
base64string = base64.encodestring('%s:%s' % (self.username, self.password))[:-1]
req.add_header('Authorization', 'Basic %s' % base64string)
self.response = urlopen(req, timeout=self.timeout).read()
else:
xml_post_url = self.url
# Get correct POST URL based on Operation list.
# If skip_caps=True, then self.operations has not been set, so use
# default URL.
if hasattr(self, 'operations'):
caller = inspect.stack()[1][3]
if caller == 'getrecords2': caller = 'getrecords'
try:
op = self.get_operation_by_name(caller)
post_verbs = filter(lambda x: x.get('type').lower() == 'post', op.methods)
if len(post_verbs) > 1:
# Filter by constraints. We must match a PostEncoding of "XML"
try:
xml_post_url = next(x for x in filter(list, ([pv.get('url') for const in pv.get('constraints') if const.name.lower() == "postencoding" and 'xml' in map(lambda x: x.lower(), const.values)] for pv in post_verbs)))[0]
except StopIteration:
# Well, just use the first one.
xml_post_url = post_verbs[0].get('url')
elif len(post_verbs) == 1:
xml_post_url = post_verbs[0].get('url')
except: # no such luck, just go with xml_post_url
pass
self.request = cleanup_namespaces(self.request)
# Add any namespaces used in the "typeNames" attribute of the
# csw:Query element to the query's xml namespaces.
for query in self.request.findall(util.nspath_eval('csw:Query', namespaces)):
ns = query.get("typeNames", None)
if ns is not None:
# Pull out "gmd" from something like "gmd:MD_Metadata" from the list
# of typenames
ns_keys = [x.split(':')[0] for x in ns.split(' ')]
self.request = add_namespaces(self.request, ns_keys)
self.request = util.element_to_string(self.request, encoding='utf-8')
self.response = util.http_post(xml_post_url, self.request, self.lang, self.timeout, self.username, self.password)
self._parse_response(self.response)
def _parse_response(self, response):
'''parse in-memory xml string from a file obj or _invoke
'''
# parse result see if it's XML
self._exml = etree.parse(StringIO.StringIO(response))
# it's XML. Attempt to decipher whether the XML response is CSW-ish """
valid_xpaths = [
util.nspath_eval('ows:ExceptionReport', namespaces),
util.nspath_eval('csw:Capabilities', namespaces),
util.nspath_eval('csw:DescribeRecordResponse', namespaces),
util.nspath_eval('csw:GetDomainResponse', namespaces),
util.nspath_eval('csw:GetRecordsResponse', namespaces),
util.nspath_eval('csw:GetRecordByIdResponse', namespaces),
util.nspath_eval('csw:HarvestResponse', namespaces),
util.nspath_eval('csw:TransactionResponse', namespaces)
]
if self._exml.getroot().tag not in valid_xpaths:
raise RuntimeError('Document is XML, but not CSW-ish')
# check if it's an OGC Exception
val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))
if val is not None:
raise ows.ExceptionReport(self._exml, self.owscommon.namespace)
else:
self.exceptionreport = None
class CswRecord(object):
""" Process csw:Record, csw:BriefRecord, csw:SummaryRecord """
def __init__(self, record):
if hasattr(record, 'getroot'): # standalone document
self.xml = etree.tostring(record.getroot())
else: # part of a larger document
self.xml = etree.tostring(record)
# check to see if Dublin Core record comes from
# rdf:RDF/rdf:Description container
# (child content model is identical)
self.rdf = False
rdf = record.find(util.nspath_eval('rdf:Description', namespaces))
if rdf is not None:
self.rdf = True
record = rdf
# some CSWs return records with multiple identifiers based on
# different schemes. Use the first dc:identifier value to set
# self.identifier, and set self.identifiers as a list of dicts
val = record.find(util.nspath_eval('dc:identifier', namespaces))
self.identifier = util.testXMLValue(val)
self.identifiers = []
for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):
d = {}
d['scheme'] = i.attrib.get('scheme')
d['identifier'] = i.text
self.identifiers.append(d)
val = record.find(util.nspath_eval('dc:type', namespaces))
self.type = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:title', namespaces))
self.title = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:alternative', namespaces))
self.alternative = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:isPartOf', namespaces))
self.ispartof = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:abstract', namespaces))
self.abstract = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:date', namespaces))
self.date = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:created', namespaces))
self.created = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:issued', namespaces))
self.issued = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:relation', namespaces))
self.relation = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:temporal', namespaces))
self.temporal = util.testXMLValue(val)
self.uris = [] # list of dicts
for i in record.findall(util.nspath_eval('dc:URI', namespaces)):
uri = {}
uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)
uri['name'] = util.testXMLValue(i.attrib.get('name'), True)
uri['description'] = util.testXMLValue(i.attrib.get('description'), True)
uri['url'] = util.testXMLValue(i)
self.uris.append(uri)
self.references = [] # list of dicts
for i in record.findall(util.nspath_eval('dct:references', namespaces)):
ref = {}
ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)
ref['url'] = util.testXMLValue(i)
self.references.append(ref)
val = record.find(util.nspath_eval('dct:modified', namespaces))
self.modified = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:creator', namespaces))
self.creator = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:publisher', namespaces))
self.publisher = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:coverage', namespaces))
self.coverage = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:contributor', namespaces))
self.contributor = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:language', namespaces))
self.language = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:source', namespaces))
self.source = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))
self.rightsholder = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:accessRights', namespaces))
self.accessrights = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:license', namespaces))
self.license = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:format', namespaces))
self.format = util.testXMLValue(val)
self.subjects = []
for i in record.findall(util.nspath_eval('dc:subject', namespaces)):
self.subjects.append(util.testXMLValue(i))
self.rights = []
for i in record.findall(util.nspath_eval('dc:rights', namespaces)):
self.rights.append(util.testXMLValue(i))
val = record.find(util.nspath_eval('dct:spatial', namespaces))
self.spatial = util.testXMLValue(val)
val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))
if val is not None:
self.bbox = ows.BoundingBox(val, namespaces['ows'])
else:
self.bbox = None
val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))
if val is not None:
self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])
else:
self.bbox_wgs84 = None
| 43.351351 | 268 | 0.611766 |
from __future__ import (absolute_import, division, print_function)
import base64
import inspect
import warnings
import StringIO
import random
from urllib import urlencode
from urllib2 import Request, urlopen
from owscapable.util import OrderedDict
from owscapable.etree import etree
from owscapable import fes
from owscapable import util
from owscapable import ows
from owscapable.iso import MD_Metadata
from owscapable.fgdc import Metadata
from owscapable.dif import DIF
from owscapable.namespaces import Namespaces
from owscapable.util import cleanup_namespaces, bind_url, add_namespaces
outputformat = 'application/xml'
def get_namespaces():
n = Namespaces()
return n.get_namespaces()
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'
schema_location = '%s %s' % (namespaces['csw'], schema)
class CatalogueServiceWeb:
def __init__(self, url, xml=None, lang='en-US', version='2.0.2', timeout=10, skip_caps=False,
username=None, password=None):
self.url = url
self.lang = lang
self.version = version
self.timeout = timeout
self.username = username
self.password = password
self.service = 'CSW'
self.exceptionreport = None
self.owscommon = ows.OwsCommon('1.0.0')
if not skip_caps:
if xml:
self._parse_response(xml)
else:
data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))
self.identification=ows.ServiceIdentification(val,self.owscommon.namespace)
val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))
self.provider=ows.ServiceProvider(val,self.owscommon.namespace)
self.operations=[]
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))
self.contents = None
val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))
self.filters=fes.FilterCapabilities(val)
def describerecord(self, typename='csw:Record', format=outputformat):
node0 = self._setrootelement('csw:DescribeRecord')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set('outputFormat', format)
node0.set('schemaLanguage', namespaces['xs2'])
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename
self.request = node0
self._invoke()
def getdomain(self, dname, dtype='parameter'):
# construct request
dtypename = 'ParameterName'
node0 = self._setrootelement('csw:GetDomain')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if dtype == 'property':
dtypename = 'PropertyName'
etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')
self.results['type'] = util.testXMLValue(val, True)
val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))
self.results[dtype] = util.testXMLValue(val)
# get the list of values associated with the Domain
self.results['values'] = []
for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):
self.results['values'].append(util.testXMLValue(f))
def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None, esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
warnings.warn("""Please use the updated 'getrecords2' method instead of 'getrecords'.
The 'getrecords' method will be upgraded to use the 'getrecords2' parameters
in a future version of OWSLib.""")
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('resultType', resulttype)
node0.set('service', self.service)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)
if sortby is not None:
fes.setsortby(node1, sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):
# construct request
data = {
'service': self.service,
'version': self.version,
'request': 'GetRecordById',
'outputFormat': format,
'outputSchema': outputschema,
'elementsetname': esn,
'id': ','.join(id),
}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
self.results = {}
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary', outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('service', self.service)
node0.set('resultType', resulttype)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
if any([len(constraints) > 0, cql is not None]):
node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))
node2.set('version', '1.1.0')
flt = fes.FilterRequest()
if len(constraints) > 0:
node2.append(flt.setConstraintList(constraints))
# Now add a CQL filter if passed in
elif cql is not None:
etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql
if sortby is not None and isinstance(sortby, fes.SortBy):
node1.append(sortby.toXML())
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
if val is not None:
self.results['nextrecord'] = int(util.testXMLValue(val, True))
else:
warnings.warn("""CSW Server did not supply a nextRecord value (it is optional), so the client
should page through the results in another way.""")
# For more info, see:
# https://github.com/geopython/OWSLib/issues/100
self.results['nextrecord'] = None
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None, bbox=None, keywords=[], cql=None, identifier=None):
# construct request
node0 = self._setrootelement('csw:Transaction')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
validtransactions = ['insert', 'update', 'delete']
if ttype not in validtransactions: # invalid transaction
raise RuntimeError('Invalid transaction \'%s\'.' % ttype)
node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))
if ttype != 'update':
node1.set('typeName', typename)
if ttype == 'insert':
if record is None:
raise RuntimeError('Nothing to insert.')
node1.append(etree.fromstring(record))
if ttype == 'update':
if record is not None:
node1.append(etree.fromstring(record))
else:
if propertyname is not None and propertyvalue is not None:
node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))
etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname
etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, identifier)
if ttype == 'delete':
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
self._parsetransactionsummary()
self._parseinsertresult()
def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):
# construct request
node0 = self._setrootelement('csw:Harvest')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source
etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype
if resourceformat is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat
if harvestinterval is not None:
etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval
if responsehandler is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))
if util.testXMLValue(val) is not None:
ts = val.attrib.get('timeStamp')
self.timestamp = util.testXMLValue(ts, True)
id = val.find(util.nspath_eval('csw:RequestId', namespaces))
self.id = util.testXMLValue(id)
else:
self._parsetransactionsummary()
self._parseinsertresult()
def get_operation_by_name(self, name):
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError("No operation named %s" % name)
def getService_urls(self, service_string=None):
urls=[]
for key,rec in self.records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def _parseinsertresult(self):
self.results['insertresults'] = []
for i in self._exml.findall(util.nspath_eval('csw:InsertResult', namespaces)):
for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):
self.results['insertresults'].append(util.testXMLValue(j))
def _parserecords(self, outputschema, esn):
if outputschema == namespaces['gmd']: # iso 19139
for i in self._exml.findall('.//'+util.nspath_eval('gmd:MD_Metadata', namespaces)) or self._exml.findall('.//'+util.nspath_eval('gmi:MI_Metadata', namespaces)):
val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = MD_Metadata(i)
elif outputschema == namespaces['fgdc']: # fgdc csdgm
for i in self._exml.findall('.//metadata'):
val = i.find('idinfo/datasetid')
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = Metadata(i)
elif outputschema == namespaces['dif']: # nasa dif
for i in self._exml.findall('.//'+util.nspath_eval('dif:DIF', namespaces)):
val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = DIF(i)
else: # process default
for i in self._exml.findall('.//'+util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):
val = i.find(util.nspath_eval('dc:identifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = CswRecord(i)
def _parsetransactionsummary(self):
val = self._exml.find(util.nspath_eval('csw:TransactionSummary', namespaces))
if val is not None:
rid = val.attrib.get('requestId')
self.results['requestid'] = util.testXMLValue(rid, True)
ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))
self.results['inserted'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))
self.results['updated'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))
self.results['deleted'] = int(util.testXMLValue(ts))
def _setesnel(self, esn):
el = 'Record'
if esn == 'brief':
el = 'BriefRecord'
if esn == 'summary':
el = 'SummaryRecord'
return el
def _setidentifierkey(self, el):
if el is None:
return 'owslib_random_%i' % random.randint(1,65536)
else:
return el
def _setrootelement(self, el):
if etree.__name__ == 'lxml.etree': # apply nsmap
return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)
else:
return etree.Element(util.nspath_eval(el, namespaces))
def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None, identifier=None):
if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:
node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))
node0.set('version', '1.1.0')
if identifier is not None: # set identifier filter, overrides all other parameters
flt = fes.FilterRequest()
node0.append(flt.set(identifier=identifier))
elif cql is not None: # send raw CQL query
# CQL passed, overrides all other parameters
node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))
node1.text = cql
else: # construct a Filter request
flt = fes.FilterRequest()
node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname,bbox=bbox))
def _invoke(self):
# do HTTP request
if isinstance(self.request, basestring): # GET KVP
req = Request(self.request)
if self.username is not None and self.password is not None:
base64string = base64.encodestring('%s:%s' % (self.username, self.password))[:-1]
req.add_header('Authorization', 'Basic %s' % base64string)
self.response = urlopen(req, timeout=self.timeout).read()
else:
xml_post_url = self.url
# Get correct POST URL based on Operation list.
# If skip_caps=True, then self.operations has not been set, so use
# default URL.
if hasattr(self, 'operations'):
caller = inspect.stack()[1][3]
if caller == 'getrecords2': caller = 'getrecords'
try:
op = self.get_operation_by_name(caller)
post_verbs = filter(lambda x: x.get('type').lower() == 'post', op.methods)
if len(post_verbs) > 1:
# Filter by constraints. We must match a PostEncoding of "XML"
try:
xml_post_url = next(x for x in filter(list, ([pv.get('url') for const in pv.get('constraints') if const.name.lower() == "postencoding" and 'xml' in map(lambda x: x.lower(), const.values)] for pv in post_verbs)))[0]
except StopIteration:
# Well, just use the first one.
xml_post_url = post_verbs[0].get('url')
elif len(post_verbs) == 1:
xml_post_url = post_verbs[0].get('url')
except: # no such luck, just go with xml_post_url
pass
self.request = cleanup_namespaces(self.request)
# Add any namespaces used in the "typeNames" attribute of the
# csw:Query element to the query's xml namespaces.
for query in self.request.findall(util.nspath_eval('csw:Query', namespaces)):
ns = query.get("typeNames", None)
if ns is not None:
ns_keys = [x.split(':')[0] for x in ns.split(' ')]
self.request = add_namespaces(self.request, ns_keys)
self.request = util.element_to_string(self.request, encoding='utf-8')
self.response = util.http_post(xml_post_url, self.request, self.lang, self.timeout, self.username, self.password)
self._parse_response(self.response)
def _parse_response(self, response):
self._exml = etree.parse(StringIO.StringIO(response))
# it's XML. Attempt to decipher whether the XML response is CSW-ish """
valid_xpaths = [
util.nspath_eval('ows:ExceptionReport', namespaces),
util.nspath_eval('csw:Capabilities', namespaces),
util.nspath_eval('csw:DescribeRecordResponse', namespaces),
util.nspath_eval('csw:GetDomainResponse', namespaces),
util.nspath_eval('csw:GetRecordsResponse', namespaces),
util.nspath_eval('csw:GetRecordByIdResponse', namespaces),
util.nspath_eval('csw:HarvestResponse', namespaces),
util.nspath_eval('csw:TransactionResponse', namespaces)
]
if self._exml.getroot().tag not in valid_xpaths:
raise RuntimeError('Document is XML, but not CSW-ish')
# check if it's an OGC Exception
val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))
if val is not None:
raise ows.ExceptionReport(self._exml, self.owscommon.namespace)
else:
self.exceptionreport = None
class CswRecord(object):
def __init__(self, record):
if hasattr(record, 'getroot'): # standalone document
self.xml = etree.tostring(record.getroot())
else: # part of a larger document
self.xml = etree.tostring(record)
# check to see if Dublin Core record comes from
# rdf:RDF/rdf:Description container
# (child content model is identical)
self.rdf = False
rdf = record.find(util.nspath_eval('rdf:Description', namespaces))
if rdf is not None:
self.rdf = True
record = rdf
# some CSWs return records with multiple identifiers based on
# different schemes. Use the first dc:identifier value to set
# self.identifier, and set self.identifiers as a list of dicts
val = record.find(util.nspath_eval('dc:identifier', namespaces))
self.identifier = util.testXMLValue(val)
self.identifiers = []
for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):
d = {}
d['scheme'] = i.attrib.get('scheme')
d['identifier'] = i.text
self.identifiers.append(d)
val = record.find(util.nspath_eval('dc:type', namespaces))
self.type = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:title', namespaces))
self.title = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:alternative', namespaces))
self.alternative = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:isPartOf', namespaces))
self.ispartof = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:abstract', namespaces))
self.abstract = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:date', namespaces))
self.date = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:created', namespaces))
self.created = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:issued', namespaces))
self.issued = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:relation', namespaces))
self.relation = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:temporal', namespaces))
self.temporal = util.testXMLValue(val)
self.uris = [] # list of dicts
for i in record.findall(util.nspath_eval('dc:URI', namespaces)):
uri = {}
uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)
uri['name'] = util.testXMLValue(i.attrib.get('name'), True)
uri['description'] = util.testXMLValue(i.attrib.get('description'), True)
uri['url'] = util.testXMLValue(i)
self.uris.append(uri)
self.references = [] # list of dicts
for i in record.findall(util.nspath_eval('dct:references', namespaces)):
ref = {}
ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)
ref['url'] = util.testXMLValue(i)
self.references.append(ref)
val = record.find(util.nspath_eval('dct:modified', namespaces))
self.modified = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:creator', namespaces))
self.creator = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:publisher', namespaces))
self.publisher = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:coverage', namespaces))
self.coverage = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:contributor', namespaces))
self.contributor = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:language', namespaces))
self.language = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:source', namespaces))
self.source = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))
self.rightsholder = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:accessRights', namespaces))
self.accessrights = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:license', namespaces))
self.license = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:format', namespaces))
self.format = util.testXMLValue(val)
self.subjects = []
for i in record.findall(util.nspath_eval('dc:subject', namespaces)):
self.subjects.append(util.testXMLValue(i))
self.rights = []
for i in record.findall(util.nspath_eval('dc:rights', namespaces)):
self.rights.append(util.testXMLValue(i))
val = record.find(util.nspath_eval('dct:spatial', namespaces))
self.spatial = util.testXMLValue(val)
val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))
if val is not None:
self.bbox = ows.BoundingBox(val, namespaces['ows'])
else:
self.bbox = None
val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))
if val is not None:
self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])
else:
self.bbox_wgs84 = None
| true | true |
f72b67fe680c8d3c4ade97a3c8636404858cf558 | 648 | py | Python | setup.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | null | null | null | setup.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | null | null | null | setup.py | sufiyanghori/sensu-plugin-python | 6682163a2a2219e8132b4c9e1dd53663fa477ae5 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='sensu_plugin',
version='0.7.0',
author='Sensu-Plugins and Contributors',
author_email='sensu-users@googlegroups.com',
packages=['sensu_plugin', 'sensu_plugin.tests'],
scripts=[],
url='https://github.com/sensu-plugins/sensu-plugin-python',
license='LICENSE.txt',
description='A framework for writing Python sensu plugins.',
long_description="""
""",
install_requires=[
'argparse',
'requests'
],
tests_require=[
'pycodestyle',
'pylint',
'coverage',
'nose',
'pytest',
'mock'
],
)
| 23.142857 | 64 | 0.594136 | from distutils.core import setup
setup(
name='sensu_plugin',
version='0.7.0',
author='Sensu-Plugins and Contributors',
author_email='sensu-users@googlegroups.com',
packages=['sensu_plugin', 'sensu_plugin.tests'],
scripts=[],
url='https://github.com/sensu-plugins/sensu-plugin-python',
license='LICENSE.txt',
description='A framework for writing Python sensu plugins.',
long_description="""
""",
install_requires=[
'argparse',
'requests'
],
tests_require=[
'pycodestyle',
'pylint',
'coverage',
'nose',
'pytest',
'mock'
],
)
| true | true |
f72b685d506ef171b93f3f1fddeda1a0a511663e | 8,723 | py | Python | tests/test_joint_logprob.py | kc611/aeppl | d24eee80a7448c48b55a8ec41aec150d1dd9d6a7 | [
"MIT"
] | null | null | null | tests/test_joint_logprob.py | kc611/aeppl | d24eee80a7448c48b55a8ec41aec150d1dd9d6a7 | [
"MIT"
] | null | null | null | tests/test_joint_logprob.py | kc611/aeppl | d24eee80a7448c48b55a8ec41aec150d1dd9d6a7 | [
"MIT"
] | null | null | null | import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats.distributions as sp
from aesara.graph.basic import Apply, ancestors, equal_computations
from aesara.graph.op import Op
from aesara.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
)
from aeppl.abstract import MeasurableVariable
from aeppl.joint_logprob import joint_logprob
from aeppl.logprob import _logprob, logprob
from aeppl.utils import rvs_to_value_vars, walk_model
from tests.utils import assert_no_rvs
def test_joint_logprob_basic():
# A simple check for when `joint_logprob` is the same as `logprob`
a = at.random.uniform(0.0, 1.0)
a.name = "a"
a_value_var = a.clone()
a_logp = joint_logprob({a: a_value_var}, sum=False)
a_logp_exp = logprob(a, a_value_var)
assert equal_computations([a_logp], [a_logp_exp])
# Let's try a hierarchical model
sigma = at.random.invgamma(0.5, 0.5)
Y = at.random.normal(0.0, sigma)
sigma_value_var = sigma.clone()
y_value_var = Y.clone()
total_ll = joint_logprob({Y: y_value_var, sigma: sigma_value_var}, sum=False)
# We need to replace the reference to `sigma` in `Y` with its value
# variable
ll_Y = logprob(Y, y_value_var)
(ll_Y,), _ = rvs_to_value_vars(
[ll_Y],
initial_replacements={sigma: sigma_value_var},
)
total_ll_exp = logprob(sigma, sigma_value_var) + ll_Y
assert equal_computations([total_ll], [total_ll_exp])
# Now, make sure we can compute a joint log-probability for a hierarchical
# model with some non-`RandomVariable` nodes
c = at.random.normal()
c.name = "c"
b_l = c * a + 2.0
b = at.random.uniform(b_l, b_l + 1.0)
b.name = "b"
b_value_var = b.clone()
c_value_var = c.clone()
b_logp = joint_logprob({a: a_value_var, b: b_value_var, c: c_value_var})
# There shouldn't be any `RandomVariable`s in the resulting graph
assert_no_rvs(b_logp)
res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
assert b_value_var in res_ancestors
assert c_value_var in res_ancestors
assert a_value_var in res_ancestors
def test_joint_logprob_multi_obs():
a = at.random.uniform(0.0, 1.0)
b = at.random.normal(0.0, 1.0)
a_val = a.clone()
b_val = b.clone()
logp = joint_logprob({a: a_val, b: b_val}, sum=False)
logp_exp = logprob(a, a_val) + logprob(b, b_val)
assert equal_computations([logp], [logp_exp])
x = at.random.normal(0, 1)
y = at.random.normal(x, 1)
x_val = x.clone()
y_val = y.clone()
logp = joint_logprob({x: x_val, y: y_val})
exp_logp = joint_logprob({x: x_val, y: y_val})
assert equal_computations([logp], [exp_logp])
def test_joint_logprob_diff_dims():
M = at.matrix("M")
x = at.random.normal(0, 1, size=M.shape[1], name="X")
y = at.random.normal(M.dot(x), 1, name="Y")
x_vv = x.clone()
x_vv.name = "x"
y_vv = y.clone()
y_vv.name = "y"
logp = joint_logprob({x: x_vv, y: y_vv})
M_val = np.random.normal(size=(10, 3))
x_val = np.random.normal(size=(3,))
y_val = np.random.normal(size=(10,))
point = {M: M_val, x_vv: x_val, y_vv: y_val}
logp_val = logp.eval(point)
exp_logp_val = (
sp.norm.logpdf(x_val, 0, 1).sum()
+ sp.norm.logpdf(y_val, M_val.dot(x_val), 1).sum()
)
assert exp_logp_val == pytest.approx(logp_val)
@pytest.mark.parametrize(
"indices, size",
[
(slice(0, 2), 5),
(np.r_[True, True, False, False, True], 5),
(np.r_[0, 1, 4], 5),
((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)),
],
)
def test_joint_logprob_incsubtensor(indices, size):
"""Make sure we can compute a joint log-probability for ``Y[idx] = data`` where ``Y`` is univariate."""
rng = np.random.RandomState(232)
mu = np.power(10, np.arange(np.prod(size))).reshape(size)
sigma = 0.001
data = rng.normal(mu[indices], 1.0)
y_val = rng.normal(mu, sigma, size=size)
Y_rv = at.random.normal(mu, sigma, size=size)
Y_rv.name = "Y"
y_value_var = Y_rv.clone()
y_value_var.name = "y"
Y_sst = at.set_subtensor(Y_rv[indices], data)
assert isinstance(
Y_sst.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)
)
Y_sst_logp = joint_logprob({Y_rv: y_value_var, Y_sst: None}, sum=False)
obs_logps = Y_sst_logp.eval({y_value_var: y_val})
y_val_idx = y_val.copy()
y_val_idx[indices] = data
exp_obs_logps = sp.norm.logpdf(y_val_idx, mu, sigma)
np.testing.assert_almost_equal(obs_logps, exp_obs_logps)
def test_joint_logprob_subtensor():
"""Make sure we can compute a joint log-probability for ``Y[I]`` where ``Y`` and ``I`` are random variables."""
size = 5
mu_base = np.power(10, np.arange(np.prod(size))).reshape(size)
mu = np.stack([mu_base, -mu_base])
sigma = 0.001
rng = aesara.shared(np.random.RandomState(232), borrow=True)
A_rv = at.random.normal(mu, sigma, rng=rng)
A_rv.name = "A"
p = 0.5
I_rv = at.random.bernoulli(p, size=size, rng=rng)
I_rv.name = "I"
A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1] :]]
assert isinstance(
A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)
)
A_idx_value_var = A_idx.type()
A_idx_value_var.name = "A_idx_value"
I_value_var = I_rv.type()
I_value_var.name = "I_value"
A_idx_logp = joint_logprob({A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False)
logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)
# The compiled graph should not contain any `RandomVariables`
assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])
decimals = 6 if aesara.config.floatX == "float64" else 4
test_val_rng = np.random.RandomState(3238)
for i in range(10):
bern_sp = sp.bernoulli(p)
I_value = bern_sp.rvs(size=size, random_state=test_val_rng).astype(I_rv.dtype)
norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1] :]], sigma)
A_idx_value = norm_sp.rvs(random_state=test_val_rng).astype(A_idx.dtype)
exp_obs_logps = norm_sp.logpdf(A_idx_value)
exp_obs_logps += bern_sp.logpmf(I_value)
logp_vals = logp_vals_fn(A_idx_value, I_value)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
def test_persist_inputs():
"""Make sure we don't unnecessarily clone variables."""
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
Y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta_vv = beta_rv.type()
y_vv = Y_rv.clone()
logp = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv})
assert x in ancestors([logp])
# Make sure we don't clone value variables when they're graphs.
y_vv_2 = y_vv * 2
logp_2 = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv_2})
assert y_vv_2 in ancestors([logp_2])
def test_ignore_logprob():
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
beta_rv.tag.ignore_logprob = True
y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta = beta_rv.type()
y = y_rv.type()
logp = joint_logprob({beta_rv: beta, y_rv: y})
y_rv_2 = at.random.normal(beta * x, 1, name="y")
logp_exp = joint_logprob({y_rv_2: y})
assert equal_computations([logp], [logp_exp])
def test_ignore_logprob_multiout():
class MyMultiOut(Op):
@staticmethod
def impl(a, b):
res1 = 2 * a
res2 = 2 * b
return [res1, res2]
def make_node(self, a, b):
return Apply(self, [a, b], [a.type(), b.type()])
def perform(self, node, inputs, outputs):
res1, res2 = self.impl(inputs[0], inputs[1])
outputs[0][0] = res1
outputs[1][0] = res2
MeasurableVariable.register(MyMultiOut)
@_logprob.register(MyMultiOut)
def logprob_MyMultiOut(op, value, *inputs, name=None, **kwargs):
return at.zeros_like(value)
Y_1_rv, Y_2_rv = MyMultiOut()(at.vector(), at.vector())
Y_1_rv.tag.ignore_logprob = True
Y_2_rv.tag.ignore_logprob = True
y_1_vv = Y_1_rv.clone()
y_2_vv = Y_2_rv.clone()
logp_exp = joint_logprob({Y_1_rv: y_1_vv, Y_2_rv: y_2_vv})
assert logp_exp is None
def test_multiple_rvs_to_same_value_raises():
x_rv1 = at.random.normal(name="x1")
x_rv2 = at.random.normal(name="x2")
x = x_rv1.type()
x.name = "x"
msg = "More than one logprob factor was assigned to the value var x"
with pytest.raises(ValueError, match=msg):
joint_logprob({x_rv1: x, x_rv2: x})
| 28.6 | 115 | 0.652872 | import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats.distributions as sp
from aesara.graph.basic import Apply, ancestors, equal_computations
from aesara.graph.op import Op
from aesara.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
)
from aeppl.abstract import MeasurableVariable
from aeppl.joint_logprob import joint_logprob
from aeppl.logprob import _logprob, logprob
from aeppl.utils import rvs_to_value_vars, walk_model
from tests.utils import assert_no_rvs
def test_joint_logprob_basic():
a = at.random.uniform(0.0, 1.0)
a.name = "a"
a_value_var = a.clone()
a_logp = joint_logprob({a: a_value_var}, sum=False)
a_logp_exp = logprob(a, a_value_var)
assert equal_computations([a_logp], [a_logp_exp])
sigma = at.random.invgamma(0.5, 0.5)
Y = at.random.normal(0.0, sigma)
sigma_value_var = sigma.clone()
y_value_var = Y.clone()
total_ll = joint_logprob({Y: y_value_var, sigma: sigma_value_var}, sum=False)
# We need to replace the reference to `sigma` in `Y` with its value
# variable
ll_Y = logprob(Y, y_value_var)
(ll_Y,), _ = rvs_to_value_vars(
[ll_Y],
initial_replacements={sigma: sigma_value_var},
)
total_ll_exp = logprob(sigma, sigma_value_var) + ll_Y
assert equal_computations([total_ll], [total_ll_exp])
# Now, make sure we can compute a joint log-probability for a hierarchical
# model with some non-`RandomVariable` nodes
c = at.random.normal()
c.name = "c"
b_l = c * a + 2.0
b = at.random.uniform(b_l, b_l + 1.0)
b.name = "b"
b_value_var = b.clone()
c_value_var = c.clone()
b_logp = joint_logprob({a: a_value_var, b: b_value_var, c: c_value_var})
# There shouldn't be any `RandomVariable`s in the resulting graph
assert_no_rvs(b_logp)
res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
assert b_value_var in res_ancestors
assert c_value_var in res_ancestors
assert a_value_var in res_ancestors
def test_joint_logprob_multi_obs():
a = at.random.uniform(0.0, 1.0)
b = at.random.normal(0.0, 1.0)
a_val = a.clone()
b_val = b.clone()
logp = joint_logprob({a: a_val, b: b_val}, sum=False)
logp_exp = logprob(a, a_val) + logprob(b, b_val)
assert equal_computations([logp], [logp_exp])
x = at.random.normal(0, 1)
y = at.random.normal(x, 1)
x_val = x.clone()
y_val = y.clone()
logp = joint_logprob({x: x_val, y: y_val})
exp_logp = joint_logprob({x: x_val, y: y_val})
assert equal_computations([logp], [exp_logp])
def test_joint_logprob_diff_dims():
M = at.matrix("M")
x = at.random.normal(0, 1, size=M.shape[1], name="X")
y = at.random.normal(M.dot(x), 1, name="Y")
x_vv = x.clone()
x_vv.name = "x"
y_vv = y.clone()
y_vv.name = "y"
logp = joint_logprob({x: x_vv, y: y_vv})
M_val = np.random.normal(size=(10, 3))
x_val = np.random.normal(size=(3,))
y_val = np.random.normal(size=(10,))
point = {M: M_val, x_vv: x_val, y_vv: y_val}
logp_val = logp.eval(point)
exp_logp_val = (
sp.norm.logpdf(x_val, 0, 1).sum()
+ sp.norm.logpdf(y_val, M_val.dot(x_val), 1).sum()
)
assert exp_logp_val == pytest.approx(logp_val)
@pytest.mark.parametrize(
"indices, size",
[
(slice(0, 2), 5),
(np.r_[True, True, False, False, True], 5),
(np.r_[0, 1, 4], 5),
((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)),
],
)
def test_joint_logprob_incsubtensor(indices, size):
rng = np.random.RandomState(232)
mu = np.power(10, np.arange(np.prod(size))).reshape(size)
sigma = 0.001
data = rng.normal(mu[indices], 1.0)
y_val = rng.normal(mu, sigma, size=size)
Y_rv = at.random.normal(mu, sigma, size=size)
Y_rv.name = "Y"
y_value_var = Y_rv.clone()
y_value_var.name = "y"
Y_sst = at.set_subtensor(Y_rv[indices], data)
assert isinstance(
Y_sst.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)
)
Y_sst_logp = joint_logprob({Y_rv: y_value_var, Y_sst: None}, sum=False)
obs_logps = Y_sst_logp.eval({y_value_var: y_val})
y_val_idx = y_val.copy()
y_val_idx[indices] = data
exp_obs_logps = sp.norm.logpdf(y_val_idx, mu, sigma)
np.testing.assert_almost_equal(obs_logps, exp_obs_logps)
def test_joint_logprob_subtensor():
size = 5
mu_base = np.power(10, np.arange(np.prod(size))).reshape(size)
mu = np.stack([mu_base, -mu_base])
sigma = 0.001
rng = aesara.shared(np.random.RandomState(232), borrow=True)
A_rv = at.random.normal(mu, sigma, rng=rng)
A_rv.name = "A"
p = 0.5
I_rv = at.random.bernoulli(p, size=size, rng=rng)
I_rv.name = "I"
A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1] :]]
assert isinstance(
A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)
)
A_idx_value_var = A_idx.type()
A_idx_value_var.name = "A_idx_value"
I_value_var = I_rv.type()
I_value_var.name = "I_value"
A_idx_logp = joint_logprob({A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False)
logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)
assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])
decimals = 6 if aesara.config.floatX == "float64" else 4
test_val_rng = np.random.RandomState(3238)
for i in range(10):
bern_sp = sp.bernoulli(p)
I_value = bern_sp.rvs(size=size, random_state=test_val_rng).astype(I_rv.dtype)
norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1] :]], sigma)
A_idx_value = norm_sp.rvs(random_state=test_val_rng).astype(A_idx.dtype)
exp_obs_logps = norm_sp.logpdf(A_idx_value)
exp_obs_logps += bern_sp.logpmf(I_value)
logp_vals = logp_vals_fn(A_idx_value, I_value)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
def test_persist_inputs():
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
Y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta_vv = beta_rv.type()
y_vv = Y_rv.clone()
logp = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv})
assert x in ancestors([logp])
y_vv_2 = y_vv * 2
logp_2 = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv_2})
assert y_vv_2 in ancestors([logp_2])
def test_ignore_logprob():
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
beta_rv.tag.ignore_logprob = True
y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta = beta_rv.type()
y = y_rv.type()
logp = joint_logprob({beta_rv: beta, y_rv: y})
y_rv_2 = at.random.normal(beta * x, 1, name="y")
logp_exp = joint_logprob({y_rv_2: y})
assert equal_computations([logp], [logp_exp])
def test_ignore_logprob_multiout():
class MyMultiOut(Op):
@staticmethod
def impl(a, b):
res1 = 2 * a
res2 = 2 * b
return [res1, res2]
def make_node(self, a, b):
return Apply(self, [a, b], [a.type(), b.type()])
def perform(self, node, inputs, outputs):
res1, res2 = self.impl(inputs[0], inputs[1])
outputs[0][0] = res1
outputs[1][0] = res2
MeasurableVariable.register(MyMultiOut)
@_logprob.register(MyMultiOut)
def logprob_MyMultiOut(op, value, *inputs, name=None, **kwargs):
return at.zeros_like(value)
Y_1_rv, Y_2_rv = MyMultiOut()(at.vector(), at.vector())
Y_1_rv.tag.ignore_logprob = True
Y_2_rv.tag.ignore_logprob = True
y_1_vv = Y_1_rv.clone()
y_2_vv = Y_2_rv.clone()
logp_exp = joint_logprob({Y_1_rv: y_1_vv, Y_2_rv: y_2_vv})
assert logp_exp is None
def test_multiple_rvs_to_same_value_raises():
x_rv1 = at.random.normal(name="x1")
x_rv2 = at.random.normal(name="x2")
x = x_rv1.type()
x.name = "x"
msg = "More than one logprob factor was assigned to the value var x"
with pytest.raises(ValueError, match=msg):
joint_logprob({x_rv1: x, x_rv2: x})
| true | true |
f72b69a62107d6763a33183b36f8ec377f171f30 | 2,003 | py | Python | sgmock/fixture/setup.py | blurstudio/sgmock | 6b5b949cbd4bc16db8060f1b07bb8113a624e9e4 | [
"BSD-3-Clause"
] | null | null | null | sgmock/fixture/setup.py | blurstudio/sgmock | 6b5b949cbd4bc16db8060f1b07bb8113a624e9e4 | [
"BSD-3-Clause"
] | null | null | null | sgmock/fixture/setup.py | blurstudio/sgmock | 6b5b949cbd4bc16db8060f1b07bb8113a624e9e4 | [
"BSD-3-Clause"
] | null | null | null | import random
from .base import Fixture
# From various online generators.
project_names = [
'Waiting for Johnson',
'Helping Delilah',
'Finding Gump',
'Double Danger',
'Master of Surrender',
'Compulsive Winter',
'Inner Space',
]
# All of the WesternX sequence codes from previous films.
sequence_names = [
'AB', 'BA', 'BB', 'BD', 'BE', 'BL', 'BS', 'BU', 'BX', 'CD', 'CF', 'CT',
'DB', 'DC', 'DD', 'DR', 'ED', 'FX', 'GB', 'GC', 'GP', 'GR', 'HH', 'HT',
'IC', 'IP', 'JK', 'JS', 'LP', 'MB', 'MD', 'MP', 'MS', 'NP', 'NS', 'OS',
'PB', 'PJ', 'PM', 'PR', 'PV', 'RB', 'RD', 'RF', 'RG', 'RT', 'SD', 'SE',
'SL', 'SM', 'SN', 'SP', 'SS', 'SX', 'UB', 'VX', 'WR', 'ZD'
]
asset_specs = [
('Character', 'Cow'),
('Character', 'Dog'),
('Character', 'Monkey'),
('Character', 'Pig'),
('Character', 'Camel'),
('Character', 'Snake'),
('Environment', 'Moon'),
('Environment', 'Mars'),
('Environment', 'Space'),
('Environment', 'Forest'),
('Environment', 'Volcano'),
]
def full(sg):
fix = Fixture(sg)
steps = fix.default_steps()
random.shuffle(project_names)
random.shuffle(sequence_names)
for proj_i in xrange(4):
proj = fix.Project(project_names.pop())
for seq_i in xrange(random.randint(3, 8)):
seq = proj.Sequence(sequence_names.pop())
for shot_i in xrange(random.randint(3, 8)):
shot = seq.Shot('%s_%03d' % (seq['code'], shot_i + 1))
for step_code in ('Online', 'MM', 'Anm', 'Light', 'Comp'):
shot.Task('Do %s Work' % step_code, steps[step_code])
random.shuffle(asset_specs)
for asset_i in xrange(random.randint(5, 9)):
type_, code = asset_specs[asset_i]
asset = proj.Asset(code, type_)
for step_code in ('Art', 'Model', 'Rig'):
asset.Task('Do %s Work' % step_code, steps[step_code])
| 28.614286 | 75 | 0.51323 | import random
from .base import Fixture
project_names = [
'Waiting for Johnson',
'Helping Delilah',
'Finding Gump',
'Double Danger',
'Master of Surrender',
'Compulsive Winter',
'Inner Space',
]
sequence_names = [
'AB', 'BA', 'BB', 'BD', 'BE', 'BL', 'BS', 'BU', 'BX', 'CD', 'CF', 'CT',
'DB', 'DC', 'DD', 'DR', 'ED', 'FX', 'GB', 'GC', 'GP', 'GR', 'HH', 'HT',
'IC', 'IP', 'JK', 'JS', 'LP', 'MB', 'MD', 'MP', 'MS', 'NP', 'NS', 'OS',
'PB', 'PJ', 'PM', 'PR', 'PV', 'RB', 'RD', 'RF', 'RG', 'RT', 'SD', 'SE',
'SL', 'SM', 'SN', 'SP', 'SS', 'SX', 'UB', 'VX', 'WR', 'ZD'
]
asset_specs = [
('Character', 'Cow'),
('Character', 'Dog'),
('Character', 'Monkey'),
('Character', 'Pig'),
('Character', 'Camel'),
('Character', 'Snake'),
('Environment', 'Moon'),
('Environment', 'Mars'),
('Environment', 'Space'),
('Environment', 'Forest'),
('Environment', 'Volcano'),
]
def full(sg):
fix = Fixture(sg)
steps = fix.default_steps()
random.shuffle(project_names)
random.shuffle(sequence_names)
for proj_i in xrange(4):
proj = fix.Project(project_names.pop())
for seq_i in xrange(random.randint(3, 8)):
seq = proj.Sequence(sequence_names.pop())
for shot_i in xrange(random.randint(3, 8)):
shot = seq.Shot('%s_%03d' % (seq['code'], shot_i + 1))
for step_code in ('Online', 'MM', 'Anm', 'Light', 'Comp'):
shot.Task('Do %s Work' % step_code, steps[step_code])
random.shuffle(asset_specs)
for asset_i in xrange(random.randint(5, 9)):
type_, code = asset_specs[asset_i]
asset = proj.Asset(code, type_)
for step_code in ('Art', 'Model', 'Rig'):
asset.Task('Do %s Work' % step_code, steps[step_code])
| true | true |
f72b6b42f15d1adf79085531fb38312487b1ca0c | 3,816 | py | Python | FairnessTest/models/Seq2Seq_WER/opts.py | zgahhblhc/DialogueFairness | dc00eb8aad7145cdd01f69c99fd79f741b9aa8d4 | [
"MIT"
] | 3 | 2020-12-10T02:20:44.000Z | 2022-02-23T18:03:30.000Z | FairnessTest/models/Seq2Seq_WER/opts.py | zgahhblhc/DialogueFairness | dc00eb8aad7145cdd01f69c99fd79f741b9aa8d4 | [
"MIT"
] | null | null | null | FairnessTest/models/Seq2Seq_WER/opts.py | zgahhblhc/DialogueFairness | dc00eb8aad7145cdd01f69c99fd79f741b9aa8d4 | [
"MIT"
] | 4 | 2020-11-03T18:33:24.000Z | 2022-02-23T18:04:01.000Z | opt = {'task': 'twitter',
'download_path': '/mnt/home/liuhaoc1/ParlAI/downloads',
'datatype': 'train',
'image_mode': 'raw',
'numthreads': 1,
'hide_labels': False,
'batchsize': 32,
'batch_sort': True,
'context_length': -1,
'include_labels': True,
'datapath': '/mnt/home/liuhaoc1/ParlAI/data',
'model': 'legacy:seq2seq:0',
'model_file': '/mnt/home/liuhaoc1/ParlAI/models/seq_twitter_reg_0.25/seq_twitter_reg_0.25',
'dict_class': '',
'evaltask': None,
'display_examples': False,
'num_epochs': -1,
'max_train_time': 205200.0,
# 'validation_every_n_secs': 600.0,
'save_every_n_secs': -1,
'save_after_valid': True,
# 'validation_max_exs': -1,
# 'validation_patience': 18,
# 'validation_metric': 'ppl',
# 'validation_metric_mode': 'min',
# 'validation_cutoff': 1.0,
'dict_build_first': True,
'load_from_checkpoint': True,
'tensorboard_log': False,
'tensorboard_tag': None,
'tensorboard_metrics': None,
'tensorboard_comment': '',
'dict_maxexs': -1,
'dict_include_valid': False,
'dict_include_test': False,
'log_every_n_secs': 15.0,
'image_size': 256,
'image_cropsize': 224,
'init_model': None,
'hiddensize': 1024,
'embeddingsize': 300,
'numlayers': 3,
'learningrate': 1.0,
'dropout': 0.0,
'gradient_clip': 0.1,
'bidirectional': False, 'attention': 'none',
'attention_length': 48, 'attention_time': 'post',
'no_cuda': False, 'gpu': -1,
'rank_candidates': False, 'truncate': 150,
'rnn_class': 'lstm', 'decoder': 'same',
'lookuptable': 'enc_dec', 'optimizer': 'sgd',
'momentum': 0.9, 'embedding_type': 'random',
'numsoftmax': 1, 'report_freq': 0.001,
'history_replies': 'label_else_model',
'person_tokens': False,
'dict_file': '',
'dict_initpath': None,
'dict_language': 'english',
'dict_max_ngram_size': -1, 'dict_minfreq': 0,
'dict_maxtokens': 30000, 'dict_nulltoken': '__NULL__',
'dict_starttoken': '__START__', 'dict_endtoken': '__END__',
'dict_unktoken': '__UNK__', 'dict_tokenizer': 're', 'dict_lower': True,
'parlai_home': '/mnt/home/liuhaoc1/ParlAI',
# 'override': {'task': 'twitter', 'max_train_time': '205200', 'model': 'seq2seq', 'numsoftmax': '1', 'hiddensize': '1024', 'embeddingsize': '300', 'attention': 'none', 'numlayers': '3', 'rnn_class': 'lstm', 'learningrate': '1',
# 'dropout': '0.0',
# 'gradient_clip': '0.1', 'lookuptable': 'enc_dec', 'optimizer': 'sgd', 'embedding_type': 'glove', 'momentum': '0.9', 'batchsize': '32', 'batch_sort': 'True', 'truncate': '150', 'validation_every_n_secs': '600', 'validation_metric': 'ppl', 'validation_metric_mode': 'min', 'validation_patience': '18', 'save_after_valid': 'True', 'load_from_checkpoint': 'True', 'dict_lower': 'True', 'dict_maxtokens': '30000', 'log_every_n_secs': '15', 'model_file': '/mnt/home/liuhaoc1/ParlAI/models/seq_twitter_aug/seq_twitter_aug'},
'starttime': 'Jun15_16-53', 'show_advanced_args': False,
'pytorch_teacher_task': None, 'pytorch_teacher_dataset': None,
'pytorch_datapath': None, 'numworkers': 4, 'pytorch_preprocess': False,
'pytorch_teacher_batch_sort': False, 'batch_sort_cache_type': 'pop',
'batch_length_range': 5, 'shuffle': False, 'batch_sort_field': 'text',
'pytorch_context_length': -1, 'pytorch_include_labels': True, 'num_examples': -1,
'metrics': 'all', 'beam_size': 1, 'beam_log_freq': 0.0, 'topk': 1,
'softmax_layer_bias': False, 'bpe_debug': False, 'dict_textfields': 'text,labels'} | 51.567568 | 539 | 0.60587 | opt = {'task': 'twitter',
'download_path': '/mnt/home/liuhaoc1/ParlAI/downloads',
'datatype': 'train',
'image_mode': 'raw',
'numthreads': 1,
'hide_labels': False,
'batchsize': 32,
'batch_sort': True,
'context_length': -1,
'include_labels': True,
'datapath': '/mnt/home/liuhaoc1/ParlAI/data',
'model': 'legacy:seq2seq:0',
'model_file': '/mnt/home/liuhaoc1/ParlAI/models/seq_twitter_reg_0.25/seq_twitter_reg_0.25',
'dict_class': '',
'evaltask': None,
'display_examples': False,
'num_epochs': -1,
'max_train_time': 205200.0,
'save_every_n_secs': -1,
'save_after_valid': True,
'dict_build_first': True,
'load_from_checkpoint': True,
'tensorboard_log': False,
'tensorboard_tag': None,
'tensorboard_metrics': None,
'tensorboard_comment': '',
'dict_maxexs': -1,
'dict_include_valid': False,
'dict_include_test': False,
'log_every_n_secs': 15.0,
'image_size': 256,
'image_cropsize': 224,
'init_model': None,
'hiddensize': 1024,
'embeddingsize': 300,
'numlayers': 3,
'learningrate': 1.0,
'dropout': 0.0,
'gradient_clip': 0.1,
'bidirectional': False, 'attention': 'none',
'attention_length': 48, 'attention_time': 'post',
'no_cuda': False, 'gpu': -1,
'rank_candidates': False, 'truncate': 150,
'rnn_class': 'lstm', 'decoder': 'same',
'lookuptable': 'enc_dec', 'optimizer': 'sgd',
'momentum': 0.9, 'embedding_type': 'random',
'numsoftmax': 1, 'report_freq': 0.001,
'history_replies': 'label_else_model',
'person_tokens': False,
'dict_file': '',
'dict_initpath': None,
'dict_language': 'english',
'dict_max_ngram_size': -1, 'dict_minfreq': 0,
'dict_maxtokens': 30000, 'dict_nulltoken': '__NULL__',
'dict_starttoken': '__START__', 'dict_endtoken': '__END__',
'dict_unktoken': '__UNK__', 'dict_tokenizer': 're', 'dict_lower': True,
'parlai_home': '/mnt/home/liuhaoc1/ParlAI',
'starttime': 'Jun15_16-53', 'show_advanced_args': False,
'pytorch_teacher_task': None, 'pytorch_teacher_dataset': None,
'pytorch_datapath': None, 'numworkers': 4, 'pytorch_preprocess': False,
'pytorch_teacher_batch_sort': False, 'batch_sort_cache_type': 'pop',
'batch_length_range': 5, 'shuffle': False, 'batch_sort_field': 'text',
'pytorch_context_length': -1, 'pytorch_include_labels': True, 'num_examples': -1,
'metrics': 'all', 'beam_size': 1, 'beam_log_freq': 0.0, 'topk': 1,
'softmax_layer_bias': False, 'bpe_debug': False, 'dict_textfields': 'text,labels'} | true | true |
f72b6b557eea5c628cc1be578fe43d5224c205f4 | 6,820 | py | Python | ivy_tests/test_ivy/test_functional/test_core/test_nest.py | mattbarrett98/ivy | a706e59b907c0f78edb819959cc2035ebf48946f | [
"Apache-2.0"
] | null | null | null | ivy_tests/test_ivy/test_functional/test_core/test_nest.py | mattbarrett98/ivy | a706e59b907c0f78edb819959cc2035ebf48946f | [
"Apache-2.0"
] | null | null | null | ivy_tests/test_ivy/test_functional/test_core/test_nest.py | mattbarrett98/ivy | a706e59b907c0f78edb819959cc2035ebf48946f | [
"Apache-2.0"
] | null | null | null | """
Collection of tests for unified general functions
"""
# global
import copy
import pytest
# local
import ivy
import ivy.functional.backends.numpy
# Helpers #
# --------#
def _snai(n, idx, v):
if len(idx) == 1:
n[idx[0]] = v
else:
_snai(n[idx[0]], idx[1:], v)
def _mnai(n, idx, fn):
if len(idx) == 1:
n[idx[0]] = fn(n[idx[0]])
else:
_mnai(n[idx[0]], idx[1:], fn)
# Tests #
# ------#
# index_nest
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
def test_index_nest(nest, index, device, call):
ret = ivy.index_nest(nest, index)
true_ret = nest
for i in index:
true_ret = true_ret[i]
assert ret == true_ret
# set_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
@pytest.mark.parametrize("value", [1])
def test_set_nest_at_index(nest, index, value, device, call):
nest_copy = copy.deepcopy(nest)
ivy.set_nest_at_index(nest, index, value)
_snai(nest_copy, index, value)
assert nest == nest_copy
# map_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0)]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2, lambda x: x**2])
def test_map_nest_at_index(nest, index, fn, device, call):
nest_copy = copy.deepcopy(nest)
ivy.map_nest_at_index(nest, index, fn)
_mnai(nest_copy, index, fn)
assert nest == nest_copy
# multi_index_nest
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"multi_indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
def test_multi_index_nest(nest, multi_indices, device, call):
rets = ivy.multi_index_nest(nest, multi_indices)
true_rets = list()
for indices in multi_indices:
true_ret = nest
for i in indices:
true_ret = true_ret[i]
true_rets.append(true_ret)
assert rets == true_rets
# set_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
@pytest.mark.parametrize("values", [(1, 2)])
def test_set_nest_at_indices(nest, indices, values, device, call):
nest_copy = copy.deepcopy(nest)
ivy.set_nest_at_indices(nest, indices, values)
def snais(n, idxs, vs):
[_snai(n, index, value) for index, value in zip(idxs, vs)]
snais(nest_copy, indices, values)
assert nest == nest_copy
# map_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0))]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2, lambda x: x**2])
def test_map_nest_at_indices(nest, indices, fn, device, call):
nest_copy = copy.deepcopy(nest)
ivy.map_nest_at_indices(nest, indices, fn)
def mnais(n, idxs, vs):
[_mnai(n, index, fn) for index in idxs]
mnais(nest_copy, indices, fn)
assert nest == nest_copy
# nested_indices_where
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_indices_where(nest, device, call):
indices = ivy.nested_indices_where(nest, lambda x: x < 5)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
# nested_indices_where_w_nest_checks
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_indices_where_w_nest_checks(nest, device, call):
indices = ivy.nested_indices_where(
nest, lambda x: isinstance(x, list) or (isinstance(x, int) and x < 5), True
)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0]
assert indices[11] == ["b", "c", 1, 1]
assert indices[12] == ["b", "c", 1]
assert indices[13] == ["b", "c"]
# all_nested_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices(nest, device, call):
indices = ivy.all_nested_indices(nest)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
assert indices[4] == ["b", "c", 1, 0, 0]
assert indices[5] == ["b", "c", 1, 1, 0]
# all_nested_indices_w_nest_checks
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices_w_nest_checks(nest, device, call):
indices = ivy.all_nested_indices(nest, True)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0, 0]
assert indices[11] == ["b", "c", 1, 0]
assert indices[12] == ["b", "c", 1, 1, 0]
assert indices[13] == ["b", "c", 1, 1]
assert indices[14] == ["b", "c", 1]
assert indices[15] == ["b", "c"]
assert indices[16] == ["b"]
# copy_nest
def test_copy_nest(device, call):
nest = {
"a": [ivy.array([0]), ivy.array([1])],
"b": {"c": [ivy.array([[2], [4]]), ivy.array([[6], [8]])]},
}
nest_copy = ivy.copy_nest(nest)
# copied nests
assert nest["a"] is not nest_copy["a"]
assert nest["b"] is not nest_copy["b"]
assert nest["b"]["c"] is not nest_copy["b"]["c"]
# non-copied arrays
assert nest["a"][0] is nest_copy["a"][0]
assert nest["a"][1] is nest_copy["a"][1]
assert nest["b"]["c"][0] is nest_copy["b"]["c"][0]
assert nest["b"]["c"][1] is nest_copy["b"]["c"][1]
| 29.396552 | 87 | 0.532111 |
import copy
import pytest
import ivy
import ivy.functional.backends.numpy
def _snai(n, idx, v):
if len(idx) == 1:
n[idx[0]] = v
else:
_snai(n[idx[0]], idx[1:], v)
def _mnai(n, idx, fn):
if len(idx) == 1:
n[idx[0]] = fn(n[idx[0]])
else:
_mnai(n[idx[0]], idx[1:], fn)
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
def test_index_nest(nest, index, device, call):
ret = ivy.index_nest(nest, index)
true_ret = nest
for i in index:
true_ret = true_ret[i]
assert ret == true_ret
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
@pytest.mark.parametrize("value", [1])
def test_set_nest_at_index(nest, index, value, device, call):
nest_copy = copy.deepcopy(nest)
ivy.set_nest_at_index(nest, index, value)
_snai(nest_copy, index, value)
assert nest == nest_copy
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0)]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2, lambda x: x**2])
def test_map_nest_at_index(nest, index, fn, device, call):
nest_copy = copy.deepcopy(nest)
ivy.map_nest_at_index(nest, index, fn)
_mnai(nest_copy, index, fn)
assert nest == nest_copy
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"multi_indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
def test_multi_index_nest(nest, multi_indices, device, call):
rets = ivy.multi_index_nest(nest, multi_indices)
true_rets = list()
for indices in multi_indices:
true_ret = nest
for i in indices:
true_ret = true_ret[i]
true_rets.append(true_ret)
assert rets == true_rets
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
@pytest.mark.parametrize("values", [(1, 2)])
def test_set_nest_at_indices(nest, indices, values, device, call):
nest_copy = copy.deepcopy(nest)
ivy.set_nest_at_indices(nest, indices, values)
def snais(n, idxs, vs):
[_snai(n, index, value) for index, value in zip(idxs, vs)]
snais(nest_copy, indices, values)
assert nest == nest_copy
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0))]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2, lambda x: x**2])
def test_map_nest_at_indices(nest, indices, fn, device, call):
nest_copy = copy.deepcopy(nest)
ivy.map_nest_at_indices(nest, indices, fn)
def mnais(n, idxs, vs):
[_mnai(n, index, fn) for index in idxs]
mnais(nest_copy, indices, fn)
assert nest == nest_copy
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_indices_where(nest, device, call):
indices = ivy.nested_indices_where(nest, lambda x: x < 5)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_indices_where_w_nest_checks(nest, device, call):
indices = ivy.nested_indices_where(
nest, lambda x: isinstance(x, list) or (isinstance(x, int) and x < 5), True
)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0]
assert indices[11] == ["b", "c", 1, 1]
assert indices[12] == ["b", "c", 1]
assert indices[13] == ["b", "c"]
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices(nest, device, call):
indices = ivy.all_nested_indices(nest)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
assert indices[4] == ["b", "c", 1, 0, 0]
assert indices[5] == ["b", "c", 1, 1, 0]
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices_w_nest_checks(nest, device, call):
indices = ivy.all_nested_indices(nest, True)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0, 0]
assert indices[11] == ["b", "c", 1, 0]
assert indices[12] == ["b", "c", 1, 1, 0]
assert indices[13] == ["b", "c", 1, 1]
assert indices[14] == ["b", "c", 1]
assert indices[15] == ["b", "c"]
assert indices[16] == ["b"]
def test_copy_nest(device, call):
nest = {
"a": [ivy.array([0]), ivy.array([1])],
"b": {"c": [ivy.array([[2], [4]]), ivy.array([[6], [8]])]},
}
nest_copy = ivy.copy_nest(nest)
assert nest["a"] is not nest_copy["a"]
assert nest["b"] is not nest_copy["b"]
assert nest["b"]["c"] is not nest_copy["b"]["c"]
assert nest["a"][0] is nest_copy["a"][0]
assert nest["a"][1] is nest_copy["a"][1]
assert nest["b"]["c"][0] is nest_copy["b"]["c"][0]
assert nest["b"]["c"][1] is nest_copy["b"]["c"][1]
| true | true |
f72b6e15d5c16951ab125cba7909154559f185ff | 1,850 | py | Python | app/i18n.py | CircuitsBots/discord-i18n | a0832a464566850eeda9bb386d7528d2d63a8fd8 | [
"MIT"
] | 1 | 2021-05-24T15:37:55.000Z | 2021-05-24T15:37:55.000Z | app/i18n.py | CircuitsBots/discord-i18n | a0832a464566850eeda9bb386d7528d2d63a8fd8 | [
"MIT"
] | 1 | 2021-05-26T12:47:11.000Z | 2021-05-26T13:31:31.000Z | app/i18n.py | CircuitsBots/discord-i18n | a0832a464566850eeda9bb386d7528d2d63a8fd8 | [
"MIT"
] | null | null | null | import contextvars
import gettext
import os.path
from glob import glob
from app.t_string import TString
BASE_DIR = ""
LOCALE_DEFAULT = "en_US"
LOCALE_DIR = "locale"
locales = frozenset(
map(
os.path.basename,
filter(os.path.isdir, glob(os.path.join(BASE_DIR, LOCALE_DIR, "*"))),
)
)
gettext_translations = {
locale: gettext.translation(
"bot",
languages=(locale,),
localedir=os.path.join(BASE_DIR, LOCALE_DIR),
)
for locale in locales
}
gettext_translations["en_US"] = gettext.NullTranslations()
locales |= {"en_US"}
def use_current_gettext(*args, **kwargs) -> str:
"""Translate a string using the proper gettext based
on the current_locale context var.
:return: The gettext for the current locale
:rtype: str
"""
if not gettext_translations:
return gettext.gettext(*args, **kwargs)
locale = current_locale.get()
return gettext_translations.get(
locale, gettext_translations[LOCALE_DEFAULT]
).gettext(*args, **kwargs)
def translate(string: str) -> str:
"""Translates text.
:param string: The text that needs translation
:type string: str
:return: The translated text
:rtype: str
"""
tstring = TString(string, use_current_gettext)
return str(tstring) # translate immediatly
def lazy_translate(string: str) -> TString:
"""Lazy translates text.
:param string: The text that needs translation
:type string: str
:return: The TString object that can be translated later
:rtype: TString
"""
tstring = TString(string, use_current_gettext)
return tstring
current_locale: contextvars.ContextVar = contextvars.ContextVar("i18n")
def set_current_locale():
"""Sets the locale to the LOCALE_DEFAULT."""
current_locale.set(LOCALE_DEFAULT)
set_current_locale()
| 23.125 | 77 | 0.687027 | import contextvars
import gettext
import os.path
from glob import glob
from app.t_string import TString
BASE_DIR = ""
LOCALE_DEFAULT = "en_US"
LOCALE_DIR = "locale"
locales = frozenset(
map(
os.path.basename,
filter(os.path.isdir, glob(os.path.join(BASE_DIR, LOCALE_DIR, "*"))),
)
)
gettext_translations = {
locale: gettext.translation(
"bot",
languages=(locale,),
localedir=os.path.join(BASE_DIR, LOCALE_DIR),
)
for locale in locales
}
gettext_translations["en_US"] = gettext.NullTranslations()
locales |= {"en_US"}
def use_current_gettext(*args, **kwargs) -> str:
if not gettext_translations:
return gettext.gettext(*args, **kwargs)
locale = current_locale.get()
return gettext_translations.get(
locale, gettext_translations[LOCALE_DEFAULT]
).gettext(*args, **kwargs)
def translate(string: str) -> str:
tstring = TString(string, use_current_gettext)
return str(tstring)
def lazy_translate(string: str) -> TString:
tstring = TString(string, use_current_gettext)
return tstring
current_locale: contextvars.ContextVar = contextvars.ContextVar("i18n")
def set_current_locale():
current_locale.set(LOCALE_DEFAULT)
set_current_locale()
| true | true |
f72b6e7d0faa806aa74633ee75bd77534ef86dea | 4,079 | py | Python | Saniti/main.py | ChamRoshi/Saniti | ddff09e60df1a2046e79e4356f07573f30210f22 | [
"Apache-2.0"
] | null | null | null | Saniti/main.py | ChamRoshi/Saniti | ddff09e60df1a2046e79e4356f07573f30210f22 | [
"Apache-2.0"
] | null | null | null | Saniti/main.py | ChamRoshi/Saniti | ddff09e60df1a2046e79e4356f07573f30210f22 | [
"Apache-2.0"
] | null | null | null | # import nltk
# import gensim
# import pandas
import string
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk import WordNetLemmatizer
from nltk import pos_tag
from nltk.stem import PorterStemmer
from gensim.models.doc2vec import TaggedDocument
from gensim.corpora import Dictionary
from gensim.models.phrases import Phrases, Phraser
"""
TODO
stemming - DONE
lemmatizing - DONE
pos filter
tfidf splitter
w2v theme relevence
w2v weightings
frequency filtering (found more than twice)
RESEARCH
kwargumenrts - ad hoc arguments for theme relevence
"""
class saniti:
def __init__(self, text = [], pipeline = [], **kwargs):
#setup
self.processes = {"token": self.token,
"depunct": self.depunct,
"unempty": self.unempty,
"out_tag_doc": self.out_tag_doc,
"out_corp_dict": self.out_corp_dic,
"lemma": self.lemma,
"destop": self.destop,
"posfilter": self.posfilter,
"phrase": self.phrase_gen,
"stem": self.stem}
self.pipeline = pipeline
self.original_text = text
if text != []:
self.text = self.process(text, self.pipeline, **kwargs)
def process(self, text, pipeline, **kwargs):
self.text = text
for line in pipeline:
text = self.processes[line](text, **kwargs)
return text
def destop(self, text, **kwargs):
text = [[word for word in doc if word not in stopwords.words("english")] for doc in text]
return text
def token(self, text, **kwargs):
if "tokenizer" in kwargs:
tokenizer = kwargs["tokenizer"]
else:
tokenizer = word_tokenize
text = [tokenizer(x) for x in text]
return text
def depunct(self, text, **kwargs):
if "puct" in kwargs:
punct = kwargs["punct"]
else:
punct = string.punctuation
punct = str.maketrans("", "", punct)
text = [[s.translate(punct) for s in doc] for doc in text]
return text
def unempty(self, text, **kwargs):
text = [[s for s in doc if s != ""] for doc in text]
return text
def lemma(self, text, **kwargs):
if "lemmatizer" in kwargs:
lemmatizer = kwargs["lemmatizer"]
else:
lemmatizer = WordNetLemmatizer()
text = [[lemmatizer.lemmatize(w) for w in doc] for doc in text]
return text
def phrase_gen(self, text, **kwargs):
if "common_terms" in kwargs:
common_terms = kwargs["common_terms"]
else:
common_terms = stopwords.words("english")
# print(list(common_terms))
phrases = Phrases(text, common_terms=common_terms)
phraser = Phraser(phrases)
text = [phraser[x] for x in text]
return text
def stem(self, text, **kwargs):
if "stemmer" in kwargs:
stemmer = kwargs["stemmer"]
else:
stemmer = PorterStemmer()
text = [[stemmer.stem(word) for word in doc] for doc in text]
return text
def posfilter(self, text, **kwargs):
if "pos_tagger" not in kwargs:
pos_tagger = pos_tag
else:
pos_tagger = kwargs["pos_tagger"]
if "pos_only" not in kwargs:
pos_only = ["NN", "VB"]
else:
pos_only = kwargs["pos_only"]
print(text)
text = [[word[0] for word in pos_tagger(doc) if word[1] in pos_only] if doc != [] else doc for doc in text]
return text
def out_corp_dic(self, text, **kwargs):
dictionary = Dictionary(text)
corpus = [dictionary.doc2bow(doc) for doc in text]
return {"dictionary": dictionary, "corpus": corpus}
def out_tag_doc(self, text, **kwargs):
if "tags" in kwargs:
tags = kwargs["tags"]
else:
tags = []
if tags == []:
if self.original_text != []:
tags = self.original_text
else :
tags = [" ".join(doc) for doc in text]
list2 = []
for xt, xid in zip(text, tags):
try:
td = TaggedDocument(xt, [xid])
list2.append(td)
except:
print(f"disambig {x}")
return(list2)
if __name__ == "__main__":
original_text = ["I like to moves it, move its", "I likeing to move it!", "the of"]
text = saniti(original_text, ["token", "destop", "depunct", "unempty", "phrase"])
print(text.text)
sani1 = saniti()
text = sani1.process(original_text, ["token", "destop", "depunct", "unempty", "lemma", "out_tag_doc"])
print(text)
| 21.356021 | 109 | 0.664379 |
import string
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk import WordNetLemmatizer
from nltk import pos_tag
from nltk.stem import PorterStemmer
from gensim.models.doc2vec import TaggedDocument
from gensim.corpora import Dictionary
from gensim.models.phrases import Phrases, Phraser
class saniti:
def __init__(self, text = [], pipeline = [], **kwargs):
self.processes = {"token": self.token,
"depunct": self.depunct,
"unempty": self.unempty,
"out_tag_doc": self.out_tag_doc,
"out_corp_dict": self.out_corp_dic,
"lemma": self.lemma,
"destop": self.destop,
"posfilter": self.posfilter,
"phrase": self.phrase_gen,
"stem": self.stem}
self.pipeline = pipeline
self.original_text = text
if text != []:
self.text = self.process(text, self.pipeline, **kwargs)
def process(self, text, pipeline, **kwargs):
self.text = text
for line in pipeline:
text = self.processes[line](text, **kwargs)
return text
def destop(self, text, **kwargs):
text = [[word for word in doc if word not in stopwords.words("english")] for doc in text]
return text
def token(self, text, **kwargs):
if "tokenizer" in kwargs:
tokenizer = kwargs["tokenizer"]
else:
tokenizer = word_tokenize
text = [tokenizer(x) for x in text]
return text
def depunct(self, text, **kwargs):
if "puct" in kwargs:
punct = kwargs["punct"]
else:
punct = string.punctuation
punct = str.maketrans("", "", punct)
text = [[s.translate(punct) for s in doc] for doc in text]
return text
def unempty(self, text, **kwargs):
text = [[s for s in doc if s != ""] for doc in text]
return text
def lemma(self, text, **kwargs):
if "lemmatizer" in kwargs:
lemmatizer = kwargs["lemmatizer"]
else:
lemmatizer = WordNetLemmatizer()
text = [[lemmatizer.lemmatize(w) for w in doc] for doc in text]
return text
def phrase_gen(self, text, **kwargs):
if "common_terms" in kwargs:
common_terms = kwargs["common_terms"]
else:
common_terms = stopwords.words("english")
phrases = Phrases(text, common_terms=common_terms)
phraser = Phraser(phrases)
text = [phraser[x] for x in text]
return text
def stem(self, text, **kwargs):
if "stemmer" in kwargs:
stemmer = kwargs["stemmer"]
else:
stemmer = PorterStemmer()
text = [[stemmer.stem(word) for word in doc] for doc in text]
return text
def posfilter(self, text, **kwargs):
if "pos_tagger" not in kwargs:
pos_tagger = pos_tag
else:
pos_tagger = kwargs["pos_tagger"]
if "pos_only" not in kwargs:
pos_only = ["NN", "VB"]
else:
pos_only = kwargs["pos_only"]
print(text)
text = [[word[0] for word in pos_tagger(doc) if word[1] in pos_only] if doc != [] else doc for doc in text]
return text
def out_corp_dic(self, text, **kwargs):
dictionary = Dictionary(text)
corpus = [dictionary.doc2bow(doc) for doc in text]
return {"dictionary": dictionary, "corpus": corpus}
def out_tag_doc(self, text, **kwargs):
if "tags" in kwargs:
tags = kwargs["tags"]
else:
tags = []
if tags == []:
if self.original_text != []:
tags = self.original_text
else :
tags = [" ".join(doc) for doc in text]
list2 = []
for xt, xid in zip(text, tags):
try:
td = TaggedDocument(xt, [xid])
list2.append(td)
except:
print(f"disambig {x}")
return(list2)
if __name__ == "__main__":
original_text = ["I like to moves it, move its", "I likeing to move it!", "the of"]
text = saniti(original_text, ["token", "destop", "depunct", "unempty", "phrase"])
print(text.text)
sani1 = saniti()
text = sani1.process(original_text, ["token", "destop", "depunct", "unempty", "lemma", "out_tag_doc"])
print(text)
| true | true |
f72b6ec0218d668349c89e6dabbf4bb56ed17158 | 3,294 | py | Python | pytest/testFragmentedBackup.py | RomanValov/ArmoryDB | 625eff9712161676ad83deb03616e6edb48283ca | [
"MIT"
] | 505 | 2016-02-04T15:54:46.000Z | 2022-03-27T18:43:01.000Z | pytest/testFragmentedBackup.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 528 | 2016-02-06T19:50:12.000Z | 2022-01-15T10:21:16.000Z | pytest/testFragmentedBackup.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 208 | 2015-01-02T10:31:40.000Z | 2021-12-14T07:37:36.000Z | ################################################################################
# #
# Copyright (C) 2011-2014, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
import sys
sys.path.append('..')
from pytest.Tiab import TiabTest
from armoryengine.ArmoryUtils import SplitSecret, binary_to_hex, ReconstructSecret,\
FiniteFieldError
import itertools
import unittest
SECRET = '\x00\x01\x02\x03\x04\x05\x06\x07'
BAD_SECRET = '\xff\xff\xff\xff\xff\xff\xff\xff'
# Fragment combination to String abreviated name for debugging purposes
def c2s(combinationMap):
return '\n'.join([' '.join([str(k), binary_to_hex(v[0]), binary_to_hex(v[1])]) \
for k,v in combinationMap.iteritems()])
def splitSecretToFragmentMap(splitSecret):
fragMap = {}
for i,frag in enumerate(splitSecret):
fragMap[i] = frag
return fragMap
class Test(TiabTest):
def setUp(self):
pass
def tearDown(self):
pass
def getNextCombination(self, fragmentMap, m):
combinationIterator = itertools.combinations(fragmentMap.iterkeys(), m)
for keyList in combinationIterator:
combinationMap = {}
for key in keyList:
combinationMap[key] = fragmentMap[key]
yield combinationMap
def subtestAllFragmentedBackups(self, secret, m, n):
fragmentMap = splitSecretToFragmentMap(SplitSecret(secret, m, n))
for combinationMap in self.getNextCombination(fragmentMap, m):
fragmentList = [value for value in combinationMap.itervalues()]
reconSecret = ReconstructSecret(fragmentList, m, len(secret))
self.assertEqual(reconSecret, secret)
def testFragmentedBackup(self):
self.subtestAllFragmentedBackups(SECRET, 2, 3)
self.subtestAllFragmentedBackups(SECRET, 2, 3)
self.subtestAllFragmentedBackups(SECRET, 3, 4)
self.subtestAllFragmentedBackups(SECRET, 5, 7)
self.subtestAllFragmentedBackups(SECRET, 8, 8)
self.subtestAllFragmentedBackups(SECRET, 2, 12)
# Secret Too big test
self.assertRaises(FiniteFieldError, SplitSecret, BAD_SECRET, 2,3)
# More needed than pieces
self.assertRaises(FiniteFieldError, SplitSecret, SECRET, 4,3)
# Secret Too many needed needed
self.assertRaises(FiniteFieldError, SplitSecret, SECRET, 9, 12)
# Too few pieces needed
self.assertRaises(FiniteFieldError, SplitSecret, SECRET, 1, 12)
# Test Reconstuction failures
fragmentList = SplitSecret(SECRET, 3, 5)
reconSecret = ReconstructSecret(fragmentList[:2], 2, len(SECRET))
self.assertNotEqual(reconSecret, SECRET)
# Running tests with "python <module name>" will NOT work for any Armory tests
# You must run tests with "python -m unittest <module name>" or run all tests with "python -m unittest discover"
# if __name__ == "__main__":
# unittest.main() | 37.862069 | 112 | 0.61779 | true | true | |
f72b6ff661d13354a6eeda99808229daf19f9eb3 | 988 | py | Python | tools/leetcode.223.Rectangle Area/leetcode.223.Rectangle Area.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | 4 | 2015-10-10T00:30:55.000Z | 2020-07-27T19:45:54.000Z | tools/leetcode.223.Rectangle Area/leetcode.223.Rectangle Area.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | tools/leetcode.223.Rectangle Area/leetcode.223.Rectangle Area.submission3.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | class Solution:
# @param {integer} A
# @param {integer} B
# @param {integer} C
# @param {integer} D
# @param {integer} E
# @param {integer} F
# @param {integer} G
# @param {integer} H
# @return {integer}
def computeArea(self, A, B, C, D, E, F, G, H):
# calculate the separate areas
w1 = C - A
h1 = D - B
w2 = G - E
h2 = H - F
A1 = w1*h1
A2 = w2*h2
if A1 == 0:
return A2
if A2 == 0:
return A1
# calculate the intersected area
wi = 0
if A <= E:
if E-A < w1:
wi = min(A+w1 - E,w2)
else:
if A - E < w2:
wi = min(E+w2 - A,w1)
hi = 0
if F <= B:
if B - F< h2:
hi = min(F + h2 - B,h1)
else:
if F - B < h1:
hi = min(B + h1 - F,h2)
A3 = wi * hi
return A1+A2-A3
| 988 | 988 | 0.374494 | class Solution:
| true | true |
f72b707bda389dc12a15cc8354b94ca79cf61a68 | 9,096 | py | Python | mfem/_ser/symmat.py | GabrielJie/PyMFEM | fa654447ac6819c5aa0341397b91a299f4ce5492 | [
"BSD-3-Clause"
] | 1 | 2022-01-19T07:16:59.000Z | 2022-01-19T07:16:59.000Z | mfem/_ser/symmat.py | GabrielJie/PyMFEM | fa654447ac6819c5aa0341397b91a299f4ce5492 | [
"BSD-3-Clause"
] | null | null | null | mfem/_ser/symmat.py | GabrielJie/PyMFEM | fa654447ac6819c5aa0341397b91a299f4ce5492 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _symmat
else:
import _symmat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _symmat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _symmat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.globals
import mfem._ser.matrix
import mfem._ser.vector
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.operators
class DenseSymmetricMatrix(mfem._ser.matrix.Matrix):
r"""Proxy of C++ mfem::DenseSymmetricMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseSymmetricMatrix self) -> DenseSymmetricMatrix
__init__(DenseSymmetricMatrix self, int s) -> DenseSymmetricMatrix
__init__(DenseSymmetricMatrix self, double * d, int s) -> DenseSymmetricMatrix
"""
_symmat.DenseSymmetricMatrix_swiginit(self, _symmat.new_DenseSymmetricMatrix(*args))
def UseExternalData(self, d, s):
r"""UseExternalData(DenseSymmetricMatrix self, double * d, int s)"""
return _symmat.DenseSymmetricMatrix_UseExternalData(self, d, s)
UseExternalData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_UseExternalData)
def Reset(self, d, s):
r"""Reset(DenseSymmetricMatrix self, double * d, int s)"""
return _symmat.DenseSymmetricMatrix_Reset(self, d, s)
Reset = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Reset)
def ClearExternalData(self):
r"""ClearExternalData(DenseSymmetricMatrix self)"""
return _symmat.DenseSymmetricMatrix_ClearExternalData(self)
ClearExternalData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_ClearExternalData)
def Clear(self):
r"""Clear(DenseSymmetricMatrix self)"""
return _symmat.DenseSymmetricMatrix_Clear(self)
Clear = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Clear)
def SetSize(self, s):
r"""SetSize(DenseSymmetricMatrix self, int s)"""
return _symmat.DenseSymmetricMatrix_SetSize(self, s)
SetSize = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_SetSize)
def Data(self):
r"""Data(DenseSymmetricMatrix self) -> double *"""
return _symmat.DenseSymmetricMatrix_Data(self)
Data = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Data)
def GetData(self):
r"""GetData(DenseSymmetricMatrix self) -> double *"""
return _symmat.DenseSymmetricMatrix_GetData(self)
GetData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_GetData)
def GetMemory(self, *args):
r"""
GetMemory(DenseSymmetricMatrix self) -> mfem::Memory< double >
GetMemory(DenseSymmetricMatrix self) -> mfem::Memory< double > const &
"""
return _symmat.DenseSymmetricMatrix_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_GetMemory)
def OwnsData(self):
r"""OwnsData(DenseSymmetricMatrix self) -> bool"""
return _symmat.DenseSymmetricMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_OwnsData)
def __call__(self, *args):
r"""
__call__(DenseSymmetricMatrix self, int i, int j) -> double
__call__(DenseSymmetricMatrix self, int i, int j) -> double const &
"""
return _symmat.DenseSymmetricMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix___call__)
def Elem(self, *args):
r"""
Elem(DenseSymmetricMatrix self, int i, int j) -> double
Elem(DenseSymmetricMatrix self, int i, int j) -> double const &
"""
return _symmat.DenseSymmetricMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Elem)
def __imul__(self, c):
r"""__imul__(DenseSymmetricMatrix self, double c) -> DenseSymmetricMatrix"""
return _symmat.DenseSymmetricMatrix___imul__(self, c)
__imul__ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix___imul__)
def MemoryUsage(self):
r"""MemoryUsage(DenseSymmetricMatrix self) -> long"""
return _symmat.DenseSymmetricMatrix_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_MemoryUsage)
def Read(self, on_dev=True):
r"""Read(DenseSymmetricMatrix self, bool on_dev=True) -> double const *"""
return _symmat.DenseSymmetricMatrix_Read(self, on_dev)
Read = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Read)
def HostRead(self):
r"""HostRead(DenseSymmetricMatrix self) -> double const *"""
return _symmat.DenseSymmetricMatrix_HostRead(self)
HostRead = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostRead)
def Write(self, on_dev=True):
r"""Write(DenseSymmetricMatrix self, bool on_dev=True) -> double *"""
return _symmat.DenseSymmetricMatrix_Write(self, on_dev)
Write = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Write)
def HostWrite(self):
r"""HostWrite(DenseSymmetricMatrix self) -> double *"""
return _symmat.DenseSymmetricMatrix_HostWrite(self)
HostWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostWrite)
def ReadWrite(self, on_dev=True):
r"""ReadWrite(DenseSymmetricMatrix self, bool on_dev=True) -> double *"""
return _symmat.DenseSymmetricMatrix_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_ReadWrite)
def HostReadWrite(self):
r"""HostReadWrite(DenseSymmetricMatrix self) -> double *"""
return _symmat.DenseSymmetricMatrix_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostReadWrite)
def Mult(self, x, y):
r"""Mult(DenseSymmetricMatrix self, Vector x, Vector y)"""
return _symmat.DenseSymmetricMatrix_Mult(self, x, y)
Mult = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Mult)
def Inverse(self):
r"""Inverse(DenseSymmetricMatrix self) -> MatrixInverse"""
return _symmat.DenseSymmetricMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Inverse)
__swig_destroy__ = _symmat.delete_DenseSymmetricMatrix
def Print(self, *args):
r"""
Print(DenseSymmetricMatrix self, std::ostream & out=out, int width_=4)
Print(DenseSymmetricMatrix self, char const * file, int precision=16)
"""
return _symmat.DenseSymmetricMatrix_Print(self, *args)
Print = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Print)
def PrintGZ(self, file, precision=16):
r"""PrintGZ(DenseSymmetricMatrix self, char const * file, int precision=16)"""
return _symmat.DenseSymmetricMatrix_PrintGZ(self, file, precision)
PrintGZ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_PrintGZ)
# Register DenseSymmetricMatrix in _symmat:
_symmat.DenseSymmetricMatrix_swigregister(DenseSymmetricMatrix)
| 40.972973 | 118 | 0.726143 |
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
if __package__ or "." in __name__:
from . import _symmat
else:
import _symmat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _symmat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _symmat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.globals
import mfem._ser.matrix
import mfem._ser.vector
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.operators
class DenseSymmetricMatrix(mfem._ser.matrix.Matrix):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_symmat.DenseSymmetricMatrix_swiginit(self, _symmat.new_DenseSymmetricMatrix(*args))
def UseExternalData(self, d, s):
return _symmat.DenseSymmetricMatrix_UseExternalData(self, d, s)
UseExternalData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_UseExternalData)
def Reset(self, d, s):
return _symmat.DenseSymmetricMatrix_Reset(self, d, s)
Reset = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Reset)
def ClearExternalData(self):
return _symmat.DenseSymmetricMatrix_ClearExternalData(self)
ClearExternalData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_ClearExternalData)
def Clear(self):
return _symmat.DenseSymmetricMatrix_Clear(self)
Clear = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Clear)
def SetSize(self, s):
return _symmat.DenseSymmetricMatrix_SetSize(self, s)
SetSize = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_SetSize)
def Data(self):
return _symmat.DenseSymmetricMatrix_Data(self)
Data = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Data)
def GetData(self):
return _symmat.DenseSymmetricMatrix_GetData(self)
GetData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_GetData)
def GetMemory(self, *args):
return _symmat.DenseSymmetricMatrix_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_GetMemory)
def OwnsData(self):
return _symmat.DenseSymmetricMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_OwnsData)
def __call__(self, *args):
return _symmat.DenseSymmetricMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix___call__)
def Elem(self, *args):
return _symmat.DenseSymmetricMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Elem)
def __imul__(self, c):
return _symmat.DenseSymmetricMatrix___imul__(self, c)
__imul__ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix___imul__)
def MemoryUsage(self):
return _symmat.DenseSymmetricMatrix_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_MemoryUsage)
def Read(self, on_dev=True):
return _symmat.DenseSymmetricMatrix_Read(self, on_dev)
Read = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Read)
def HostRead(self):
return _symmat.DenseSymmetricMatrix_HostRead(self)
HostRead = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostRead)
def Write(self, on_dev=True):
return _symmat.DenseSymmetricMatrix_Write(self, on_dev)
Write = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Write)
def HostWrite(self):
return _symmat.DenseSymmetricMatrix_HostWrite(self)
HostWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostWrite)
def ReadWrite(self, on_dev=True):
return _symmat.DenseSymmetricMatrix_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_ReadWrite)
def HostReadWrite(self):
return _symmat.DenseSymmetricMatrix_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_HostReadWrite)
def Mult(self, x, y):
return _symmat.DenseSymmetricMatrix_Mult(self, x, y)
Mult = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Mult)
def Inverse(self):
return _symmat.DenseSymmetricMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Inverse)
__swig_destroy__ = _symmat.delete_DenseSymmetricMatrix
def Print(self, *args):
return _symmat.DenseSymmetricMatrix_Print(self, *args)
Print = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_Print)
def PrintGZ(self, file, precision=16):
return _symmat.DenseSymmetricMatrix_PrintGZ(self, file, precision)
PrintGZ = _swig_new_instance_method(_symmat.DenseSymmetricMatrix_PrintGZ)
_symmat.DenseSymmetricMatrix_swigregister(DenseSymmetricMatrix)
| true | true |
f72b70b8b138e4a1d4a8b2d5b7e35ce0046217f0 | 4,948 | py | Python | src/pretalx/schedule/utils.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | null | null | null | src/pretalx/schedule/utils.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | null | null | null | src/pretalx/schedule/utils.py | Hydro2shine/sprout | 7dfa5e9fa0a7ef9157517ad0752e393599053873 | [
"Apache-2.0"
] | null | null | null | from contextlib import suppress
from datetime import timedelta
from dateutil.parser import parse
from django.db import transaction
from django_scopes import scope
from pretalx.person.models import SpeakerProfile, User
from pretalx.schedule.models import Room, TalkSlot
from pretalx.submission.models import (
Submission, SubmissionStates, SubmissionType, Track,
)
def guess_schedule_version(event):
if not event.current_schedule:
return '0.1'
version = event.current_schedule.version
prefix = ''
for separator in [',', '.', '-', '_']:
if separator in version:
prefix, version = version.rsplit(separator, maxsplit=1)
break
if version.isdigit():
version = str(int(version) + 1)
return prefix + separator + version
return ''
@transaction.atomic()
def process_frab(root, event):
"""
Takes an xml document root and an event, and releases a schedule with the data from the xml document.
Called from the `import_schedule` manage command, at least.
"""
with scope(event=event):
for day in root.findall('day'):
for rm in day.findall('room'):
room, _ = Room.objects.get_or_create(event=event, name=rm.attrib['name'])
for talk in rm.findall('event'):
_create_talk(talk=talk, room=room, event=event)
schedule_version = root.find('version').text
try:
event.wip_schedule.freeze(schedule_version, notify_speakers=False)
schedule = event.schedules.get(version=schedule_version)
except Exception:
raise Exception(
f'Could not import "{event.name}" schedule version "{schedule_version}": failed creating schedule release.'
)
schedule.talks.update(is_visible=True)
start = schedule.talks.order_by('start').first().start
end = schedule.talks.order_by('-end').first().end
event.date_from = start.date()
event.date_to = end.date()
event.save()
return (
f'Successfully imported "{event.name}" schedule version "{schedule_version}".'
)
def _create_talk(*, talk, room, event):
date = talk.find('date').text
start = parse(date + ' ' + talk.find('start').text)
hours, minutes = talk.find('duration').text.split(':')
duration = timedelta(hours=int(hours), minutes=int(minutes))
duration_in_minutes = duration.total_seconds() / 60
try:
end = parse(date + ' ' + talk.find('end').text)
except AttributeError:
end = start + duration
sub_type = SubmissionType.objects.filter(
event=event, name=talk.find('type').text, default_duration=duration_in_minutes
).first()
if not sub_type:
sub_type = SubmissionType.objects.create(
name=talk.find('type').text or 'default',
event=event,
default_duration=duration_in_minutes,
)
track = Track.objects.filter(event=event, name=talk.find('track').text).first()
if not track:
track = Track.objects.create(
name=talk.find('track').text or 'default', event=event
)
optout = False
with suppress(AttributeError):
optout = talk.find('recording').find('optout').text == 'true'
code = None
if (
Submission.objects.filter(code__iexact=talk.attrib['id'], event=event).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['id']).exists()
):
code = talk.attrib['id']
elif (
Submission.objects.filter(
code__iexact=talk.attrib['guid'][:16], event=event
).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['guid'][:16]).exists()
):
code = talk.attrib['guid'][:16]
sub, _ = Submission.objects.get_or_create(
event=event, code=code, defaults={'submission_type': sub_type}
)
sub.submission_type = sub_type
sub.track = track
sub.title = talk.find('title').text
sub.description = talk.find('description').text
if talk.find('subtitle').text:
sub.description = talk.find('subtitle').text + '\n' + (sub.description or '')
sub.abstract = talk.find('abstract').text
sub.content_locale = talk.find('language').text or 'en'
sub.do_not_record = optout
sub.state = SubmissionStates.CONFIRMED
sub.save()
for person in talk.find('persons').findall('person'):
user = User.objects.filter(name=person.text[:60]).first()
if not user:
user = User(name=person.text, email=f'{person.text}@localhost')
user.save()
SpeakerProfile.objects.create(user=user, event=event)
sub.speakers.add(user)
slot, _ = TalkSlot.objects.get_or_create(
submission=sub, schedule=event.wip_schedule, is_visible=True
)
slot.room = room
slot.is_visible = True
slot.start = start
slot.end = end
slot.save()
| 34.84507 | 123 | 0.637833 | from contextlib import suppress
from datetime import timedelta
from dateutil.parser import parse
from django.db import transaction
from django_scopes import scope
from pretalx.person.models import SpeakerProfile, User
from pretalx.schedule.models import Room, TalkSlot
from pretalx.submission.models import (
Submission, SubmissionStates, SubmissionType, Track,
)
def guess_schedule_version(event):
if not event.current_schedule:
return '0.1'
version = event.current_schedule.version
prefix = ''
for separator in [',', '.', '-', '_']:
if separator in version:
prefix, version = version.rsplit(separator, maxsplit=1)
break
if version.isdigit():
version = str(int(version) + 1)
return prefix + separator + version
return ''
@transaction.atomic()
def process_frab(root, event):
with scope(event=event):
for day in root.findall('day'):
for rm in day.findall('room'):
room, _ = Room.objects.get_or_create(event=event, name=rm.attrib['name'])
for talk in rm.findall('event'):
_create_talk(talk=talk, room=room, event=event)
schedule_version = root.find('version').text
try:
event.wip_schedule.freeze(schedule_version, notify_speakers=False)
schedule = event.schedules.get(version=schedule_version)
except Exception:
raise Exception(
f'Could not import "{event.name}" schedule version "{schedule_version}": failed creating schedule release.'
)
schedule.talks.update(is_visible=True)
start = schedule.talks.order_by('start').first().start
end = schedule.talks.order_by('-end').first().end
event.date_from = start.date()
event.date_to = end.date()
event.save()
return (
f'Successfully imported "{event.name}" schedule version "{schedule_version}".'
)
def _create_talk(*, talk, room, event):
date = talk.find('date').text
start = parse(date + ' ' + talk.find('start').text)
hours, minutes = talk.find('duration').text.split(':')
duration = timedelta(hours=int(hours), minutes=int(minutes))
duration_in_minutes = duration.total_seconds() / 60
try:
end = parse(date + ' ' + talk.find('end').text)
except AttributeError:
end = start + duration
sub_type = SubmissionType.objects.filter(
event=event, name=talk.find('type').text, default_duration=duration_in_minutes
).first()
if not sub_type:
sub_type = SubmissionType.objects.create(
name=talk.find('type').text or 'default',
event=event,
default_duration=duration_in_minutes,
)
track = Track.objects.filter(event=event, name=talk.find('track').text).first()
if not track:
track = Track.objects.create(
name=talk.find('track').text or 'default', event=event
)
optout = False
with suppress(AttributeError):
optout = talk.find('recording').find('optout').text == 'true'
code = None
if (
Submission.objects.filter(code__iexact=talk.attrib['id'], event=event).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['id']).exists()
):
code = talk.attrib['id']
elif (
Submission.objects.filter(
code__iexact=talk.attrib['guid'][:16], event=event
).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['guid'][:16]).exists()
):
code = talk.attrib['guid'][:16]
sub, _ = Submission.objects.get_or_create(
event=event, code=code, defaults={'submission_type': sub_type}
)
sub.submission_type = sub_type
sub.track = track
sub.title = talk.find('title').text
sub.description = talk.find('description').text
if talk.find('subtitle').text:
sub.description = talk.find('subtitle').text + '\n' + (sub.description or '')
sub.abstract = talk.find('abstract').text
sub.content_locale = talk.find('language').text or 'en'
sub.do_not_record = optout
sub.state = SubmissionStates.CONFIRMED
sub.save()
for person in talk.find('persons').findall('person'):
user = User.objects.filter(name=person.text[:60]).first()
if not user:
user = User(name=person.text, email=f'{person.text}@localhost')
user.save()
SpeakerProfile.objects.create(user=user, event=event)
sub.speakers.add(user)
slot, _ = TalkSlot.objects.get_or_create(
submission=sub, schedule=event.wip_schedule, is_visible=True
)
slot.room = room
slot.is_visible = True
slot.start = start
slot.end = end
slot.save()
| true | true |
f72b726b4dcae39366215f12ae7011d2b2bf9606 | 1,072 | py | Python | Codes-B/matplotlib-py-files/matplot-image.py | sanils2002/PYTHON-CODES | 607fadc2cba4b185a5529bd101faefa08f4c3469 | [
"MIT"
] | null | null | null | Codes-B/matplotlib-py-files/matplot-image.py | sanils2002/PYTHON-CODES | 607fadc2cba4b185a5529bd101faefa08f4c3469 | [
"MIT"
] | null | null | null | Codes-B/matplotlib-py-files/matplot-image.py | sanils2002/PYTHON-CODES | 607fadc2cba4b185a5529bd101faefa08f4c3469 | [
"MIT"
] | null | null | null | # importing required libraries
import matplotlib.pyplot as plt
import matplotlib.image as img
# reading the image
testImage = img.imread('g4g.png')
# displaying the image
plt.imshow(testImage)
# displaying the image as an array
print(testImage)
###############################################
# In the output image, only the mode of the image is modified
# reading the image
testImage = img.imread('g4g.png')
# displaying the shape of the image
print(testImage.shape)
# modifying the shape of the image
modifiedImage = testImage[:, :, 0]
# displaying the modified image
plt.imshow(modifiedImage)
# Here the height of the image is 150 pixels (displaying from the 50th pixel),
# width is 100 pixels (displaying from the 100th pixel) and mode value is 1.
# reading the image
testImage = img.imread('g4g.png')
# displaying the shape of the image
print(testImage.shape)
# modifying the shape of the image
modifiedImage = testImage[50:200, 100:200, 1]
# displaying the modified image
plt.imshow(modifiedImage)
| 23.304348 | 80 | 0.689366 |
import matplotlib.pyplot as plt
import matplotlib.image as img
testImage = img.imread('g4g.png')
plt.imshow(testImage)
print(testImage)
| true | true |
f72b72d7b104572721fe60e33a1bbc59a4784e63 | 827 | py | Python | setup.py | STEMinds/Eduponics-Pi-MQTT | 9a8359aaec6b0f571897cc454341d4df336a0b20 | [
"MIT"
] | 10 | 2020-08-12T08:03:00.000Z | 2021-07-07T05:42:36.000Z | setup.py | STEMinds/Eduponics-Pi-MQTT | 9a8359aaec6b0f571897cc454341d4df336a0b20 | [
"MIT"
] | null | null | null | setup.py | STEMinds/Eduponics-Pi-MQTT | 9a8359aaec6b0f571897cc454341d4df336a0b20 | [
"MIT"
] | 2 | 2020-08-15T06:50:28.000Z | 2020-08-19T06:33:23.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="eduponics-mqtt-STEMinds", # Replace with your own username
version="0.0.1",
author="Roni Gorodetsky",
author_email="contact@steminds.com",
description="Python MQTT package for STEMinds Eduponics react-native mobile app",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/STEMinds/Eduponics-Pi-MQTT",
packages=setuptools.find_packages(),
install_requires=[
'pyqrcode',
'paho-mqtt',
'pypng'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 29.535714 | 85 | 0.657799 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="eduponics-mqtt-STEMinds",
version="0.0.1",
author="Roni Gorodetsky",
author_email="contact@steminds.com",
description="Python MQTT package for STEMinds Eduponics react-native mobile app",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/STEMinds/Eduponics-Pi-MQTT",
packages=setuptools.find_packages(),
install_requires=[
'pyqrcode',
'paho-mqtt',
'pypng'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| true | true |
f72b7400fbb8c82da785145fefce3095b952b913 | 3,480 | py | Python | src/m2_extra.py | josephklaw/99-CapstoneProject-201920 | 0e8b8a652a8694e453c57ff42c412043e02b9800 | [
"MIT"
] | null | null | null | src/m2_extra.py | josephklaw/99-CapstoneProject-201920 | 0e8b8a652a8694e453c57ff42c412043e02b9800 | [
"MIT"
] | null | null | null | src/m2_extra.py | josephklaw/99-CapstoneProject-201920 | 0e8b8a652a8694e453c57ff42c412043e02b9800 | [
"MIT"
] | null | null | null | import ev3dev.ev3 as ev3
import rosebot as robot
import time
def increasing_tone(initial_tone, tone_rate_increase, speed, robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(speed, speed)
starting_distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
while True:
new_distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
if new_distance < starting_distance:
initial_tone = initial_tone + tone_rate_increase
starting_distance = new_distance
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() < 1:
break
robot.sound_system.tone_maker.play_tone(initial_tone, 150)
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def point_to_object(direction, speed, initial_tone, tone_rate_increase, robot):
""":type robot: rosebot.RoseBot"""
p = ev3.Sensor(driver_name="pixy-lego")
p.mode = "SIG1"
if direction == "CCW":
robot.drive_system.spin_counterclockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
if direction == "CW":
robot.drive_system.spin_clockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
increasing_tone(initial_tone, tone_rate_increase, speed, robot)
#Sprint 3 Functions
def color_finder(color, robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(75, 75)
while True:
if robot.sensor_system.color_sensor.get_color() == int(color):
robot.drive_system.stop()
robot.sound_system.speech_maker.speak("I found the color")
print(robot.sensor_system.color_sensor.get_color())
break
def find_object(speed, robot):
""":type robot: rosebot.RoseBot"""
p = ev3.Sensor(driver_name="pixy-lego")
p.mode = "SIG1"
robot.drive_system.go_straight_for_seconds(3, speed)
robot.drive_system.spin_counterclockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
robot.drive_system.go(speed, speed)
while True:
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() < 0.75:
break
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def line_following(robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(50, 50)
while True:
if robot.sensor_system.color_sensor.get_color() == 1:
robot.drive_system.right_motor.turn_off()
robot.drive_system.left_motor.turn_off()
robot.drive_system.go(50,50)
if robot.sensor_system.color_sensor.get_color() == 4:
robot.drive_system.right_motor.turn_off()
robot.drive_system.right_motor.turn_on(-20)
robot.drive_system.left_motor.turn_off()
robot.drive_system.left_motor.turn_on(50)
if robot.sensor_system.color_sensor.get_color() == 5:
robot.drive_system.right_motor.turn_off()
robot.drive_system.right_motor.turn_on(50)
robot.drive_system.left_motor.turn_off()
robot.drive_system.left_motor.turn_on(-20)
if robot.sensor_system.color_sensor.get_color() == 6:
robot.drive_system.stop()
robot.arm_and_claw.move_arm_to_position(0)
break
time.sleep(0.01)
# # - 1: Black
# - 2: Blue
# - 3: Green
# - 4: Yellow
# - 5: Red
# - 6: White
# - 7: Brown | 36.631579 | 103 | 0.66408 | import ev3dev.ev3 as ev3
import rosebot as robot
import time
def increasing_tone(initial_tone, tone_rate_increase, speed, robot):
robot.drive_system.go(speed, speed)
starting_distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
while True:
new_distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
if new_distance < starting_distance:
initial_tone = initial_tone + tone_rate_increase
starting_distance = new_distance
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() < 1:
break
robot.sound_system.tone_maker.play_tone(initial_tone, 150)
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def point_to_object(direction, speed, initial_tone, tone_rate_increase, robot):
p = ev3.Sensor(driver_name="pixy-lego")
p.mode = "SIG1"
if direction == "CCW":
robot.drive_system.spin_counterclockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
if direction == "CW":
robot.drive_system.spin_clockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
increasing_tone(initial_tone, tone_rate_increase, speed, robot)
def color_finder(color, robot):
robot.drive_system.go(75, 75)
while True:
if robot.sensor_system.color_sensor.get_color() == int(color):
robot.drive_system.stop()
robot.sound_system.speech_maker.speak("I found the color")
print(robot.sensor_system.color_sensor.get_color())
break
def find_object(speed, robot):
p = ev3.Sensor(driver_name="pixy-lego")
p.mode = "SIG1"
robot.drive_system.go_straight_for_seconds(3, speed)
robot.drive_system.spin_counterclockwise_until_sees_object(int(speed), p.value(3) * p.value(4))
robot.drive_system.go(speed, speed)
while True:
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() < 0.75:
break
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def line_following(robot):
robot.drive_system.go(50, 50)
while True:
if robot.sensor_system.color_sensor.get_color() == 1:
robot.drive_system.right_motor.turn_off()
robot.drive_system.left_motor.turn_off()
robot.drive_system.go(50,50)
if robot.sensor_system.color_sensor.get_color() == 4:
robot.drive_system.right_motor.turn_off()
robot.drive_system.right_motor.turn_on(-20)
robot.drive_system.left_motor.turn_off()
robot.drive_system.left_motor.turn_on(50)
if robot.sensor_system.color_sensor.get_color() == 5:
robot.drive_system.right_motor.turn_off()
robot.drive_system.right_motor.turn_on(50)
robot.drive_system.left_motor.turn_off()
robot.drive_system.left_motor.turn_on(-20)
if robot.sensor_system.color_sensor.get_color() == 6:
robot.drive_system.stop()
robot.arm_and_claw.move_arm_to_position(0)
break
time.sleep(0.01)
| true | true |
f72b748ba56730f9620b2b9c4d086d31b3c8eea7 | 101,044 | py | Python | numpy/random/tests/test_generator_mt19937.py | czgdp1807/numpy | fb314a390851d4c21f3f6a2a87cffd329219c524 | [
"BSD-3-Clause"
] | 1 | 2021-12-27T06:52:12.000Z | 2021-12-27T06:52:12.000Z | numpy/random/tests/test_generator_mt19937.py | zooba/numpy | e4894aef5c93c845081388818a2eb4264c5e1d72 | [
"BSD-3-Clause"
] | 32 | 2019-05-20T02:43:57.000Z | 2022-01-28T21:06:29.000Z | numpy/random/tests/test_generator_mt19937.py | zooba/numpy | e4894aef5c93c845081388818a2eb4264c5e1d72 | [
"BSD-3-Clause"
] | null | null | null | import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the md5 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert md5.hexdigest() == config["initial"]["key_md5"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert md5.hexdigest() == config["jumped"]["key_md5"]
| 42.101667 | 90 | 0.581331 | import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0),
(5000000, 7, np.uint8, 150.0),
(10000000, 2500, np.int16, 3300.0),
(50000000, 5000, np.uint16, 6500.0),
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the md5 hashes of the initial and the final states' keys and
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert md5.hexdigest() == config["initial"]["key_md5"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert md5.hexdigest() == config["jumped"]["key_md5"]
| true | true |
f72b75b24d8df7b71d3467658ad40886dad96118 | 454 | py | Python | snacks/models.py | anas-abusaif/djangox | 969dbb9ade0f242f250bd65a1a0d6893d1fea07f | [
"MIT"
] | null | null | null | snacks/models.py | anas-abusaif/djangox | 969dbb9ade0f242f250bd65a1a0d6893d1fea07f | [
"MIT"
] | null | null | null | snacks/models.py | anas-abusaif/djangox | 969dbb9ade0f242f250bd65a1a0d6893d1fea07f | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
# Create your models here.
class Snack(models.Model):
title = models.CharField(max_length=64)
purchaser = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
description = models.TextField()
def __str__(self) -> str:
return self.title
def get_absolute_url(self):
return reverse("snack_detail", args=[str(self.pk)]) | 28.375 | 75 | 0.759912 | from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
class Snack(models.Model):
title = models.CharField(max_length=64)
purchaser = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
description = models.TextField()
def __str__(self) -> str:
return self.title
def get_absolute_url(self):
return reverse("snack_detail", args=[str(self.pk)]) | true | true |
f72b75d390d9cde5998e85de12cd980938336177 | 5,168 | py | Python | rllib/examples/env/parametric_actions_cartpole.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 33 | 2020-05-27T14:25:24.000Z | 2022-03-22T06:11:30.000Z | rllib/examples/env/parametric_actions_cartpole.py | daobook/ray | af9f1ef4dc160e0671206556b387f8017f3c3930 | [
"Apache-2.0"
] | 227 | 2021-10-01T08:00:01.000Z | 2021-12-28T16:47:26.000Z | rllib/examples/env/parametric_actions_cartpole.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 5 | 2020-08-06T15:53:07.000Z | 2022-02-09T03:31:31.000Z | import gym
from gym.spaces import Box, Dict, Discrete
import numpy as np
import random
class ParametricActionsCartPole(gym.Env):
"""Parametric action version of CartPole.
In this env there are only ever two valid actions, but we pretend there are
actually up to `max_avail_actions` actions that can be taken, and the two
valid actions are randomly hidden among this set.
At each step, we emit a dict of:
- the actual cart observation
- a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)
- the list of action embeddings (w/ zeroes for invalid actions) (e.g.,
[[0, 0],
[0, 0],
[-0.2322, -0.2569],
[0, 0],
[0, 0],
[0.7878, 1.2297]] for max_avail_actions=6)
In a real environment, the actions embeddings would be larger than two
units of course, and also there would be a variable number of valid actions
per step instead of always [LEFT, RIGHT].
"""
def __init__(self, max_avail_actions):
# Use simple random 2-unit action embeddings for [LEFT, RIGHT]
self.left_action_embed = np.random.randn(2)
self.right_action_embed = np.random.randn(2)
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"action_mask": Box(
0, 1, shape=(max_avail_actions, ), dtype=np.float32),
"avail_actions": Box(-10, 10, shape=(max_avail_actions, 2)),
"cart": self.wrapped.observation_space,
})
def update_avail_actions(self):
self.action_assignments = np.array(
[[0., 0.]] * self.action_space.n, dtype=np.float32)
self.action_mask = np.array(
[0.] * self.action_space.n, dtype=np.float32)
self.left_idx, self.right_idx = random.sample(
range(self.action_space.n), 2)
self.action_assignments[self.left_idx] = self.left_action_embed
self.action_assignments[self.right_idx] = self.right_action_embed
self.action_mask[self.left_idx] = 1
self.action_mask[self.right_idx] = 1
def reset(self):
self.update_avail_actions()
return {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.action_assignments, self.action_mask,
self.left_idx, self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
self.update_avail_actions()
self.action_mask = self.action_mask.astype(np.float32)
obs = {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": orig_obs,
}
return obs, rew, done, info
class ParametricActionsCartPoleNoEmbeddings(gym.Env):
"""Same as the above ParametricActionsCartPole.
However, action embeddings are not published inside observations,
but will be learnt by the model.
At each step, we emit a dict of:
- the actual cart observation
- a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)
- action embeddings (w/ "dummy embedding" for invalid actions) are
outsourced in the model and will be learned.
"""
def __init__(self, max_avail_actions):
# Randomly set which two actions are valid and available.
self.left_idx, self.right_idx = random.sample(
range(max_avail_actions), 2)
self.valid_avail_actions_mask = np.array(
[0.] * max_avail_actions, dtype=np.float32)
self.valid_avail_actions_mask[self.left_idx] = 1
self.valid_avail_actions_mask[self.right_idx] = 1
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"valid_avail_actions_mask": Box(0, 1, shape=(max_avail_actions, )),
"cart": self.wrapped.observation_space,
})
def reset(self):
return {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.valid_avail_actions_mask, self.left_idx,
self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
obs = {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": orig_obs,
}
return obs, rew, done, info
| 38.567164 | 79 | 0.615906 | import gym
from gym.spaces import Box, Dict, Discrete
import numpy as np
import random
class ParametricActionsCartPole(gym.Env):
def __init__(self, max_avail_actions):
self.left_action_embed = np.random.randn(2)
self.right_action_embed = np.random.randn(2)
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"action_mask": Box(
0, 1, shape=(max_avail_actions, ), dtype=np.float32),
"avail_actions": Box(-10, 10, shape=(max_avail_actions, 2)),
"cart": self.wrapped.observation_space,
})
def update_avail_actions(self):
self.action_assignments = np.array(
[[0., 0.]] * self.action_space.n, dtype=np.float32)
self.action_mask = np.array(
[0.] * self.action_space.n, dtype=np.float32)
self.left_idx, self.right_idx = random.sample(
range(self.action_space.n), 2)
self.action_assignments[self.left_idx] = self.left_action_embed
self.action_assignments[self.right_idx] = self.right_action_embed
self.action_mask[self.left_idx] = 1
self.action_mask[self.right_idx] = 1
def reset(self):
self.update_avail_actions()
return {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.action_assignments, self.action_mask,
self.left_idx, self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
self.update_avail_actions()
self.action_mask = self.action_mask.astype(np.float32)
obs = {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": orig_obs,
}
return obs, rew, done, info
class ParametricActionsCartPoleNoEmbeddings(gym.Env):
def __init__(self, max_avail_actions):
self.left_idx, self.right_idx = random.sample(
range(max_avail_actions), 2)
self.valid_avail_actions_mask = np.array(
[0.] * max_avail_actions, dtype=np.float32)
self.valid_avail_actions_mask[self.left_idx] = 1
self.valid_avail_actions_mask[self.right_idx] = 1
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"valid_avail_actions_mask": Box(0, 1, shape=(max_avail_actions, )),
"cart": self.wrapped.observation_space,
})
def reset(self):
return {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.valid_avail_actions_mask, self.left_idx,
self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
obs = {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": orig_obs,
}
return obs, rew, done, info
| true | true |
f72b75dd9e026a6ef00652afd5b76efddbde22d2 | 192 | py | Python | captain_comeback/restart/messages.py | waldo2590/captain-comeback | e02e3774eab62d7b8ba454331a785e2ae32c89fc | [
"MIT"
] | null | null | null | captain_comeback/restart/messages.py | waldo2590/captain-comeback | e02e3774eab62d7b8ba454331a785e2ae32c89fc | [
"MIT"
] | null | null | null | captain_comeback/restart/messages.py | waldo2590/captain-comeback | e02e3774eab62d7b8ba454331a785e2ae32c89fc | [
"MIT"
] | 1 | 2020-10-27T06:40:08.000Z | 2020-10-27T06:40:08.000Z | # coding:utf-8
class RestartRequestedMessage(object):
def __init__(self, cg):
self.cg = cg
class RestartCompleteMessage(object):
def __init__(self, cg):
self.cg = cg
| 19.2 | 38 | 0.661458 |
class RestartRequestedMessage(object):
def __init__(self, cg):
self.cg = cg
class RestartCompleteMessage(object):
def __init__(self, cg):
self.cg = cg
| true | true |
f72b75e5be3cd503344771ffbf3987aff531bc9b | 416 | py | Python | plotly/validators/scatterpolar/_thetasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scatterpolar/_thetasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/scatterpolar/_thetasrc.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class ThetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='thetasrc', parent_name='scatterpolar', **kwargs
):
super(ThetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| 26 | 74 | 0.629808 | import _plotly_utils.basevalidators
class ThetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name='thetasrc', parent_name='scatterpolar', **kwargs
):
super(ThetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| true | true |
f72b76631a7a3c4e17aca32054d6f0a3ca9e33a8 | 14,058 | py | Python | jax_omeroutils/importer.py | mellertd/jax-omeroutils | 0190522109f9476e25f55292693dfa56f1037606 | [
"MIT"
] | null | null | null | jax_omeroutils/importer.py | mellertd/jax-omeroutils | 0190522109f9476e25f55292693dfa56f1037606 | [
"MIT"
] | null | null | null | jax_omeroutils/importer.py | mellertd/jax-omeroutils | 0190522109f9476e25f55292693dfa56f1037606 | [
"MIT"
] | null | null | null | """
This module is for managing OMERO imports, making use of the OMERO CLI,
which can be called from a Python script. Note that this code requires
a properly structured import.json file, which is produced during data
intake (using the intake.py module).
"""
import logging
from ezomero import post_dataset, post_project
from ezomero import get_image_ids, link_images_to_dataset
from ezomero import post_screen, link_plates_to_screen
from importlib import import_module
from omero.cli import CLI
from omero.plugins.sessions import SessionsControl
from omero.rtypes import rstring
from omero.sys import Parameters
from omero.gateway import MapAnnotationWrapper
from pathlib import Path
ImportControl = import_module("omero.plugins.import").ImportControl
# Constants
CURRENT_MD_NS = 'jax.org/omeroutils/user_submitted/v0'
# Functions
def set_or_create_project(conn, project_name):
"""Create a new Project unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_name : str
The name of the Project needed. If there is no Project with a matching
name in the group specified in ``conn``, a new Project will be created.
Returns
-------
project_id : int
The id of the Project that was either found or created.
"""
ps = conn.getObjects('Project', attributes={'name': project_name})
ps = list(ps)
if len(ps) == 0:
project_id = post_project(conn, project_name)
print(f'Created new Project:{project_id}')
else:
project_id = ps[0].getId()
return project_id
def set_or_create_dataset(conn, project_id, dataset_name):
"""Create a new Dataset unless one already exists with that name/Project.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_id : int
Id of Project in which to find/create Dataset.
dataset_name : str
The name of the Dataset needed. If there is no Dataset with a matching
name in the group specified in ``conn``, in the Project specified with
``project_id``, a new Dataset will be created accordingly.
Returns
-------
dataset_id : int
The id of the Dataset that was either found or created.
"""
ds = conn.getObjects('Dataset',
attributes={'name': dataset_name},
opts={'project': project_id})
ds = list(ds)
if len(ds) == 0:
dataset_id = post_dataset(conn, dataset_name, project_id=project_id)
print(f'Created new Dataset:{dataset_id}')
else:
dataset_id = ds[0].getId()
return dataset_id
def set_or_create_screen(conn, screen_name):
"""Create a new Screen unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
screen_name : str
The name of the Screen needed. If there is no Screen with a matching
name in the group specified in ``conn``, a new Screen will be created.
Returns
-------
screen_id : int
The id of the Project that was either found or created.
"""
ss = conn.getObjects('Screen', attributes={'name': screen_name})
ss = list(ss)
if len(ss) == 0:
screen_id = post_screen(conn, screen_name)
print(f'Created new Screen:{screen_id}')
else:
screen_id = ss[0].getId()
return screen_id
def multi_post_map_annotation(conn, object_type, object_ids, kv_dict, ns):
"""Create a single new MapAnnotation and link to multiple images.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
object_type : str
OMERO object type, passed to ``BlitzGateway.getObjects``
object_ids : int or list of ints
IDs of objects to which the new MapAnnotation will be linked.
kv_dict : dict
key-value pairs that will be included in the MapAnnotation
ns : str
Namespace for the MapAnnotation
Notes
-----
All keys and values are converted to strings before saving in OMERO.
Returns
-------
map_ann_id : int
IDs of newly created MapAnnotation
Examples
--------
>>> ns = 'jax.org/jax/example/namespace'
>>> d = {'species': 'human',
'occupation': 'time traveler'
'first name': 'Kyle',
'surname': 'Reese'}
>>> multi_post_map_annotation(conn, "Image", [23,56,78], d, ns)
234
"""
if type(object_ids) not in [list, int]:
raise TypeError('object_ids must be list or integer')
if type(object_ids) is not list:
object_ids = [object_ids]
if len(object_ids) == 0:
raise ValueError('object_ids must contain one or more items')
if type(kv_dict) is not dict:
raise TypeError('kv_dict must be of type `dict`')
kv_pairs = []
for k, v in kv_dict.items():
k = str(k)
v = str(v)
kv_pairs.append([k, v])
map_ann = MapAnnotationWrapper(conn)
map_ann.setNs(str(ns))
map_ann.setValue(kv_pairs)
map_ann.save()
for o in conn.getObjects(object_type, object_ids):
o.linkAnnotation(map_ann)
return map_ann.getId()
# Class definitions
class Importer:
"""Class for managing OMERO imports using OMERO CLI.
Metadata from ``import.json`` (item in 'import_targets') is required for
assigning to Project/Dataset and adding MapAnnotations.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
file_path : pathlike object
Path to the file to imported into OMERO.
import_md : dict
Contains metadata required for import and annotation. Generally, at
item from ``import.json`` ('import_targets').
Attributes
----------
conn : ``omero.gateway.BlitzGateway`` object.
From parameter given at initialization.
file_path : ``pathlib.Path`` object
From parameter given at initialization.
md : dict
From ``import_md`` parameter given at initialization.
session_uuid : str
UUID for OMERO session represented by ``self.conn``. Supplied to
OMERO CLI for connection purposes.
filename : str
Filename of file to be imported. Populated from ``self.md``.
project : str
Name of Project to contain the image. Populated from ``self.md``.
dataset : str
Name of Dataset to contain the image. Poplulated from ``self.md``.
imported : boolean
Flag indicating import status.
image_ids : list of ints
The Ids of the images in OMERO. Populated after a file is imported.
This list may contain one or more images derived from a single file.
"""
def __init__(self, conn, file_path, import_md):
self.conn = conn
self.file_path = Path(file_path)
self.md = import_md
self.session_uuid = conn.getSession().getUuid().val
self.filename = self.md.pop('filename')
if 'project' in self.md.keys():
self.project = self.md.pop('project')
else:
self.project = None
if 'dataset' in self.md.keys():
self.dataset = self.md.pop('dataset')
else:
self.dataset = None
if 'screen' in self.md.keys():
self.screen = self.md.pop('screen')
else:
self.screen = None
self.imported = False
self.image_ids = None
self.plate_ids = None
def get_image_ids(self):
"""Get the Ids of imported images.
Note that this will not find images if they have not been imported.
Also, while image_ids are returned, this method also sets
``self.image_ids``.
Returns
-------
image_ids : list of ints
Ids of images imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
q = self.conn.getQueryService()
params = Parameters()
path_query = str(self.file_path).strip('/')
params.map = {"cpath": rstring(path_query)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
self.image_ids = [r[0].val for r in results]
return self.image_ids
def get_plate_ids(self):
"""Get the Ids of imported plates.
Note that this will not find plates if they have not been imported.
Also, while plate_ids are returned, this method also sets
``self.plate_ids``.
Returns
-------
plate_ids : list of ints
Ids of plates imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
print("time to get some IDs")
q = self.conn.getQueryService()
print(q)
params = Parameters()
path_query = str(self.file_path).strip('/')
print(f"path query: f{path_query}")
params.map = {"cpath": rstring(path_query)}
print(params)
results = q.projection(
"SELECT DISTINCT p.id FROM Plate p"
" JOIN p.plateAcquisitions pa"
" JOIN pa.wellSample ws"
" JOIN ws.image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
print(results)
self.plate_ids = [r[0].val for r in results]
return self.plate_ids
def annotate_images(self):
"""Post map annotation (``self.md``) to images ``self.image_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.image_ids) == 0:
logging.error('No image ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Image",
self.image_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def annotate_plates(self):
"""Post map annotation (``self.md``) to plates ``self.plate_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Plate",
self.plate_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def organize_images(self):
"""Move images to ``self.project``/``self.dataset``.
Returns
-------
image_moved : boolean
True if images were found and moved, else False.
"""
if not self.image_ids:
logging.error('No image ids to organize')
return False
orphans = get_image_ids(self.conn)
for im_id in self.image_ids:
if im_id not in orphans:
logging.error(f'Image:{im_id} not an orphan')
else:
project_id = set_or_create_project(self.conn, self.project)
dataset_id = set_or_create_dataset(self.conn,
project_id,
self.dataset)
link_images_to_dataset(self.conn, [im_id], dataset_id)
print(f'Moved Image:{im_id} to Dataset:{dataset_id}')
return True
def organize_plates(self):
"""Move plates to ``self.screen``.
Returns
-------
plate_moved : boolean
True if plates were found and moved, else False.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to organize')
return False
for pl_id in self.plate_ids:
screen_id = set_or_create_screen(self.conn, self.screen)
link_plates_to_screen(self.conn, [pl_id], screen_id)
print(f'Moved Plate:{pl_id} to Screen:{screen_id}')
return True
def import_ln_s(self, host, port):
"""Import file using the ``--transfer=ln_s`` option.
Parameters
----------
host : str
Hostname of OMERO server in which images will be imported.
port : int
Port used to connect to OMERO.server.
Returns
-------
import_status : boolean
True if OMERO import returns a 0 exit status, else False.
"""
cli = CLI()
cli.register('import', ImportControl, '_')
cli.register('sessions', SessionsControl, '_')
cli.invoke(['import',
'-k', self.conn.getSession().getUuid().val,
'-s', host,
'-p', str(port),
'--transfer', 'ln_s',
str(self.file_path)])
if cli.rv == 0:
self.imported = True
print(f'Imported {self.file_path}')
return True
else:
logging.error(f'Import of {self.file_path} has failed!')
return False
| 34.121359 | 79 | 0.583155 |
import logging
from ezomero import post_dataset, post_project
from ezomero import get_image_ids, link_images_to_dataset
from ezomero import post_screen, link_plates_to_screen
from importlib import import_module
from omero.cli import CLI
from omero.plugins.sessions import SessionsControl
from omero.rtypes import rstring
from omero.sys import Parameters
from omero.gateway import MapAnnotationWrapper
from pathlib import Path
ImportControl = import_module("omero.plugins.import").ImportControl
CURRENT_MD_NS = 'jax.org/omeroutils/user_submitted/v0'
def set_or_create_project(conn, project_name):
ps = conn.getObjects('Project', attributes={'name': project_name})
ps = list(ps)
if len(ps) == 0:
project_id = post_project(conn, project_name)
print(f'Created new Project:{project_id}')
else:
project_id = ps[0].getId()
return project_id
def set_or_create_dataset(conn, project_id, dataset_name):
ds = conn.getObjects('Dataset',
attributes={'name': dataset_name},
opts={'project': project_id})
ds = list(ds)
if len(ds) == 0:
dataset_id = post_dataset(conn, dataset_name, project_id=project_id)
print(f'Created new Dataset:{dataset_id}')
else:
dataset_id = ds[0].getId()
return dataset_id
def set_or_create_screen(conn, screen_name):
ss = conn.getObjects('Screen', attributes={'name': screen_name})
ss = list(ss)
if len(ss) == 0:
screen_id = post_screen(conn, screen_name)
print(f'Created new Screen:{screen_id}')
else:
screen_id = ss[0].getId()
return screen_id
def multi_post_map_annotation(conn, object_type, object_ids, kv_dict, ns):
if type(object_ids) not in [list, int]:
raise TypeError('object_ids must be list or integer')
if type(object_ids) is not list:
object_ids = [object_ids]
if len(object_ids) == 0:
raise ValueError('object_ids must contain one or more items')
if type(kv_dict) is not dict:
raise TypeError('kv_dict must be of type `dict`')
kv_pairs = []
for k, v in kv_dict.items():
k = str(k)
v = str(v)
kv_pairs.append([k, v])
map_ann = MapAnnotationWrapper(conn)
map_ann.setNs(str(ns))
map_ann.setValue(kv_pairs)
map_ann.save()
for o in conn.getObjects(object_type, object_ids):
o.linkAnnotation(map_ann)
return map_ann.getId()
class Importer:
def __init__(self, conn, file_path, import_md):
self.conn = conn
self.file_path = Path(file_path)
self.md = import_md
self.session_uuid = conn.getSession().getUuid().val
self.filename = self.md.pop('filename')
if 'project' in self.md.keys():
self.project = self.md.pop('project')
else:
self.project = None
if 'dataset' in self.md.keys():
self.dataset = self.md.pop('dataset')
else:
self.dataset = None
if 'screen' in self.md.keys():
self.screen = self.md.pop('screen')
else:
self.screen = None
self.imported = False
self.image_ids = None
self.plate_ids = None
def get_image_ids(self):
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
q = self.conn.getQueryService()
params = Parameters()
path_query = str(self.file_path).strip('/')
params.map = {"cpath": rstring(path_query)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
self.image_ids = [r[0].val for r in results]
return self.image_ids
def get_plate_ids(self):
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
print("time to get some IDs")
q = self.conn.getQueryService()
print(q)
params = Parameters()
path_query = str(self.file_path).strip('/')
print(f"path query: f{path_query}")
params.map = {"cpath": rstring(path_query)}
print(params)
results = q.projection(
"SELECT DISTINCT p.id FROM Plate p"
" JOIN p.plateAcquisitions pa"
" JOIN pa.wellSample ws"
" JOIN ws.image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
print(results)
self.plate_ids = [r[0].val for r in results]
return self.plate_ids
def annotate_images(self):
if len(self.image_ids) == 0:
logging.error('No image ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Image",
self.image_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def annotate_plates(self):
if len(self.plate_ids) == 0:
logging.error('No plate ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Plate",
self.plate_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def organize_images(self):
if not self.image_ids:
logging.error('No image ids to organize')
return False
orphans = get_image_ids(self.conn)
for im_id in self.image_ids:
if im_id not in orphans:
logging.error(f'Image:{im_id} not an orphan')
else:
project_id = set_or_create_project(self.conn, self.project)
dataset_id = set_or_create_dataset(self.conn,
project_id,
self.dataset)
link_images_to_dataset(self.conn, [im_id], dataset_id)
print(f'Moved Image:{im_id} to Dataset:{dataset_id}')
return True
def organize_plates(self):
if len(self.plate_ids) == 0:
logging.error('No plate ids to organize')
return False
for pl_id in self.plate_ids:
screen_id = set_or_create_screen(self.conn, self.screen)
link_plates_to_screen(self.conn, [pl_id], screen_id)
print(f'Moved Plate:{pl_id} to Screen:{screen_id}')
return True
def import_ln_s(self, host, port):
cli = CLI()
cli.register('import', ImportControl, '_')
cli.register('sessions', SessionsControl, '_')
cli.invoke(['import',
'-k', self.conn.getSession().getUuid().val,
'-s', host,
'-p', str(port),
'--transfer', 'ln_s',
str(self.file_path)])
if cli.rv == 0:
self.imported = True
print(f'Imported {self.file_path}')
return True
else:
logging.error(f'Import of {self.file_path} has failed!')
return False
| true | true |
f72b767f4b0695212462ecc1142bccc223f481ec | 755 | py | Python | meraki/api/content_filtering_categories.py | fsandberg/dashboard-api-python | c01ff038643a39bd12660d2719375eeb05c7ba24 | [
"MIT"
] | null | null | null | meraki/api/content_filtering_categories.py | fsandberg/dashboard-api-python | c01ff038643a39bd12660d2719375eeb05c7ba24 | [
"MIT"
] | null | null | null | meraki/api/content_filtering_categories.py | fsandberg/dashboard-api-python | c01ff038643a39bd12660d2719375eeb05c7ba24 | [
"MIT"
] | null | null | null | class ContentFilteringCategories(object):
def __init__(self, session):
super(ContentFilteringCategories, self).__init__()
self._session = session
def getNetworkContentFilteringCategories(self, networkId: str):
"""
**List all available content filtering categories for an MX network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-content-filtering-categories
- networkId (string)
"""
metadata = {
'tags': ['Content filtering categories'],
'operation': 'getNetworkContentFilteringCategories',
}
resource = f'/networks/{networkId}/contentFiltering/categories'
return self._session.get(metadata, resource)
| 34.318182 | 97 | 0.651656 | class ContentFilteringCategories(object):
def __init__(self, session):
super(ContentFilteringCategories, self).__init__()
self._session = session
def getNetworkContentFilteringCategories(self, networkId: str):
metadata = {
'tags': ['Content filtering categories'],
'operation': 'getNetworkContentFilteringCategories',
}
resource = f'/networks/{networkId}/contentFiltering/categories'
return self._session.get(metadata, resource)
| true | true |
f72b76cf495d554aada39527fcd895707831533d | 540 | py | Python | model/group_data.py | AlexeyKozlov/python_training-master | 5e677e8be521bffac027843c5ba049695ca31492 | [
"Apache-2.0"
] | null | null | null | model/group_data.py | AlexeyKozlov/python_training-master | 5e677e8be521bffac027843c5ba049695ca31492 | [
"Apache-2.0"
] | null | null | null | model/group_data.py | AlexeyKozlov/python_training-master | 5e677e8be521bffac027843c5ba049695ca31492 | [
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 21.6 | 103 | 0.574074 |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| true | true |
f72b773ac9b1776af73d49d00186f6e6bae720af | 576 | py | Python | step4.py | rezabojnordi/learning-image-processing | b0abb4005428af40dda3958561b7a78cfc6bafa6 | [
"MIT"
] | null | null | null | step4.py | rezabojnordi/learning-image-processing | b0abb4005428af40dda3958561b7a78cfc6bafa6 | [
"MIT"
] | null | null | null | step4.py | rezabojnordi/learning-image-processing | b0abb4005428af40dda3958561b7a78cfc6bafa6 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(1) # number 0 for one camera
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc,24.0,(640,480)) #params 3 = fram rate speed fram for save
# GRAY SCALE FOR ALL PIC
while(True): #video is pics
ret,fram = cap.read()
gray = cv2.cvtColor(fram,cv2.COLOR_BGR2GRAY) # cv2 =>blue - green -red
out.write(fram)
cv2.imshow('cameras',fram)
if cv2.waitKey(1) & 0XFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows() | 28.8 | 100 | 0.689236 | import cv2
import numpy as np
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc,24.0,(640,480))
while(True):
ret,fram = cap.read()
gray = cv2.cvtColor(fram,cv2.COLOR_BGR2GRAY)
out.write(fram)
cv2.imshow('cameras',fram)
if cv2.waitKey(1) & 0XFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows() | true | true |
f72b7753eec3b5170014e7cf49eb5e0152bc3dab | 1,282 | py | Python | custom/new/new.py | freepoet/mmdetection | 74894c36da600014c372646c34ff4838d6968942 | [
"Apache-2.0"
] | 1 | 2021-01-21T14:20:48.000Z | 2021-01-21T14:20:48.000Z | custom/new/new.py | freepoet/mmdetection | 74894c36da600014c372646c34ff4838d6968942 | [
"Apache-2.0"
] | null | null | null | custom/new/new.py | freepoet/mmdetection | 74894c36da600014c372646c34ff4838d6968942 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#读取mnist数据集字符图片
import torch
import torchvision
from PIL import Image
import cv2
import numpy as np
import os
import gzip
import matplotlib
import matplotlib.pyplot as pltsfas
# 定义加载数据的函数,data_folder为保存gz数据的文件夹,该文件夹下有4个文件
# 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
# 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
def load_data(data_folder):
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(os.path.join(data_folder,fname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
(train_images, train_labels), (test_images, test_labels) = load_data('../../data/MNIST/raw')
| 29.136364 | 92 | 0.690328 |
import torch
import torchvision
from PIL import Image
import cv2
import numpy as np
import os
import gzip
import matplotlib
import matplotlib.pyplot as pltsfas
def load_data(data_folder):
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(os.path.join(data_folder,fname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
(train_images, train_labels), (test_images, test_labels) = load_data('../../data/MNIST/raw')
| true | true |
f72b78257722b6c8736302bff4164efaba35af74 | 176 | py | Python | Day01-15/code/Day02/variable1.py | bdfd/Python_Zero2Hero_DS | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | 3 | 2022-01-15T19:06:19.000Z | 2022-01-18T16:47:27.000Z | Day01-15/code/Day02/variable1.py | bdfd/4.5_Data-Science-Python-Zero2Hero- | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | null | null | null | Day01-15/code/Day02/variable1.py | bdfd/4.5_Data-Science-Python-Zero2Hero- | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | 1 | 2022-01-09T00:18:49.000Z | 2022-01-09T00:18:49.000Z | """
使用变量保存数据并进行操作
Version: 0.1
Author: BDFD
Date: 2018-02-27
"""
a = 321
b = 123
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a // b)
print(a % b)
print(a ** b)
| 9.777778 | 16 | 0.573864 |
a = 321
b = 123
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a // b)
print(a % b)
print(a ** b)
| true | true |
f72b7a277c123afcf6d76434f86d3383e699a8fc | 3,379 | py | Python | setup.py | rajivshah3/checkov | c6a6eca21bedae50574814c92973b65d2d963581 | [
"Apache-2.0"
] | 1 | 2021-02-16T15:07:29.000Z | 2021-02-16T15:07:29.000Z | setup.py | rajivshah3/checkov | c6a6eca21bedae50574814c92973b65d2d963581 | [
"Apache-2.0"
] | null | null | null | setup.py | rajivshah3/checkov | c6a6eca21bedae50574814c92973b65d2d963581 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import logging
import os
from importlib import util
from os import path
import setuptools
from setuptools import setup
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
logger = logging.getLogger(__name__)
spec = util.spec_from_file_location(
"checkov.version", os.path.join("checkov", "version.py")
)
# noinspection PyUnresolvedReferences
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
version = mod.version # type: ignore
setup(
extras_require={
"dev": [
"alabaster==0.7.12",
"attrs==19.3.0",
"babel==2.7.0",
"certifi==2019.11.28",
"chardet==3.0.4",
"coverage==4.5.4",
"coverage-badge==1.0.1",
"docopt==0.6.2",
"docutils==0.15.2",
"idna==2.8",
"imagesize==1.1.0",
"importlib-metadata==1.1.0; python_version < '3.8'",
"jinja2==2.10.3",
"lark-parser==0.7.8",
"markupsafe==1.1.1",
"more-itertools==8.0.0",
"packaging==19.2",
"pluggy==0.13.1",
"py==1.8.0",
"pygments==2.5.2",
"pyparsing==2.4.5",
"pytest==5.3.1",
"bc-python-hcl2>=0.3.10",
"pytz==2019.3",
"pyyaml==5.3.1",
"requests==2.22.0",
"six==1.15.0",
"snowballstemmer==2.0.0",
"sphinx==2.2.1",
"sphinxcontrib-applehelp==1.0.1",
"sphinxcontrib-devhelp==1.0.1",
"sphinxcontrib-htmlhelp==1.0.2",
"sphinxcontrib-jsmath==1.0.1",
"sphinxcontrib-qthelp==1.0.2",
"sphinxcontrib-serializinghtml==1.1.3",
"urllib3==1.25.10",
"wcwidth==0.1.7",
"zipp==0.6.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
]
},
install_requires=[
"update-checker==0.18.0",
"tqdm==4.49.0",
"boto3==1.12.43",
"chardet==3.0.4",
"colorama==0.4.3",
"docopt==0.6.2",
"idna==2.8",
"jmespath==0.10.0",
"junit-xml==1.8",
"lark-parser==0.7.8",
"bc-python-hcl2>=0.3.11",
"pyyaml==5.3.1",
"requests==2.22.0",
"six==1.15.0",
"tabulate==0.8.6",
"termcolor==1.1.0",
"urllib3==1.25.10",
"dpath==1.5.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
],
license="Apache License 2.0",
name="checkov",
version=version,
description="Infrastructure as code static analysis",
author="bridgecrew",
author_email="meet@bridgecrew.io",
url="https://github.com/bridgecrewio/checkov",
packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
scripts=["bin/checkov","bin/checkov.cmd"],
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 3.7',
'Topic :: Security',
'Topic :: Software Development :: Build Tools'
]
)
| 30.441441 | 79 | 0.527671 |
import logging
import os
from importlib import util
from os import path
import setuptools
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
logger = logging.getLogger(__name__)
spec = util.spec_from_file_location(
"checkov.version", os.path.join("checkov", "version.py")
)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
version = mod.version
setup(
extras_require={
"dev": [
"alabaster==0.7.12",
"attrs==19.3.0",
"babel==2.7.0",
"certifi==2019.11.28",
"chardet==3.0.4",
"coverage==4.5.4",
"coverage-badge==1.0.1",
"docopt==0.6.2",
"docutils==0.15.2",
"idna==2.8",
"imagesize==1.1.0",
"importlib-metadata==1.1.0; python_version < '3.8'",
"jinja2==2.10.3",
"lark-parser==0.7.8",
"markupsafe==1.1.1",
"more-itertools==8.0.0",
"packaging==19.2",
"pluggy==0.13.1",
"py==1.8.0",
"pygments==2.5.2",
"pyparsing==2.4.5",
"pytest==5.3.1",
"bc-python-hcl2>=0.3.10",
"pytz==2019.3",
"pyyaml==5.3.1",
"requests==2.22.0",
"six==1.15.0",
"snowballstemmer==2.0.0",
"sphinx==2.2.1",
"sphinxcontrib-applehelp==1.0.1",
"sphinxcontrib-devhelp==1.0.1",
"sphinxcontrib-htmlhelp==1.0.2",
"sphinxcontrib-jsmath==1.0.1",
"sphinxcontrib-qthelp==1.0.2",
"sphinxcontrib-serializinghtml==1.1.3",
"urllib3==1.25.10",
"wcwidth==0.1.7",
"zipp==0.6.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
]
},
install_requires=[
"update-checker==0.18.0",
"tqdm==4.49.0",
"boto3==1.12.43",
"chardet==3.0.4",
"colorama==0.4.3",
"docopt==0.6.2",
"idna==2.8",
"jmespath==0.10.0",
"junit-xml==1.8",
"lark-parser==0.7.8",
"bc-python-hcl2>=0.3.11",
"pyyaml==5.3.1",
"requests==2.22.0",
"six==1.15.0",
"tabulate==0.8.6",
"termcolor==1.1.0",
"urllib3==1.25.10",
"dpath==1.5.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
],
license="Apache License 2.0",
name="checkov",
version=version,
description="Infrastructure as code static analysis",
author="bridgecrew",
author_email="meet@bridgecrew.io",
url="https://github.com/bridgecrewio/checkov",
packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
scripts=["bin/checkov","bin/checkov.cmd"],
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 3.7',
'Topic :: Security',
'Topic :: Software Development :: Build Tools'
]
)
| true | true |
f72b7a44c047b0c5eed99383fa2b7f7c2da824b8 | 3,974 | py | Python | geeksurvey/views.py | NAU-SuperGeeks/geeksurvey | ee2fd0d2869f3b8b5c058d8a39c64c2d0b0c2a26 | [
"MIT"
] | 3 | 2022-01-01T23:00:45.000Z | 2022-02-26T23:35:46.000Z | geeksurvey/views.py | NAU-SuperGeeks/geeksurvey | ee2fd0d2869f3b8b5c058d8a39c64c2d0b0c2a26 | [
"MIT"
] | 5 | 2022-02-12T17:52:52.000Z | 2022-03-02T16:08:08.000Z | geeksurvey/views.py | NAU-SuperGeeks/geeksurvey | ee2fd0d2869f3b8b5c058d8a39c64c2d0b0c2a26 | [
"MIT"
] | 4 | 2022-01-12T18:47:20.000Z | 2022-01-12T19:11:59.000Z | from django.core.mail import send_mail
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from decouple import config
from geeksurvey.settings import *
import json
from .models import Study, Profile
from .forms import *
def index(request):
if request.user.is_authenticated:
profile = Profile.objects.get(user=request.user)
context = {
'profile': profile,
}
else:
context = {}
return render(request, 'home.html', context)
def working(request):
return render(request, 'working.html')
def help(request):
return render(request, 'help.html')
@login_required
def participate(request):
all_studies = Study.objects.all()
enrolled_studies = []
completed_studies = []
for study in all_studies:
if request.user in study.completed.all():
completed_studies.append(study)
elif request.user in study.enrolled.all():
enrolled_studies.append(study)
profile = Profile.objects.get(user=request.user)
context = {
'enrolled_studies':enrolled_studies,
'completed_studies':completed_studies,
'profile': profile,
}
return render(request, 'participate/index.html', context)
@login_required
def part_discover(request):
user_profile = Profile.objects.get(user=request.user)
all_studies = Study.objects.all()
eligible_studies = []
for study in all_studies:
if user_profile.can_enroll(study):
eligible_studies.append(study)
context = {
'studies': eligible_studies,
'profile': user_profile,
}
return render(request, 'participate/discover.html', context)
@login_required
def profile(request):
profile = Profile.objects.get(user=request.user)
context={'profile':profile}
return render(request, 'profile/index.html', context)
# public profile view, accesible by url
def profile_view(request, username):
user = get_object_or_404(User, username=username)
profile = Profile.objects.get(user=user)
context = {
'user':user,
'profile':profile,
}
return render(request, 'profile/view.html', context)
@login_required
def profile_update(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
new_profile = p_form.save(commit=False)
new_profile.updated_once = True
new_profile.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile') # Redirect back to profile page
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
p_form['open_source_experience'].label = "Experienced With Open Source Development?"
p_form['email_opt_in'].label = "Opt In For Email Notifications?"
context = {
'profile': profile,
'u_form': u_form,
'p_form': p_form
}
return render(request, 'profile/update.html', context)
@login_required
def research(request):
profile = Profile.objects.get(user=request.user)
# show existing studies created by the user
studies = Study.objects.filter(owner=request.user)
context = {
'profile':profile,
'studies':studies
}
return render(request, 'research/index.html', context)
| 29.879699 | 92 | 0.67539 | from django.core.mail import send_mail
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from decouple import config
from geeksurvey.settings import *
import json
from .models import Study, Profile
from .forms import *
def index(request):
if request.user.is_authenticated:
profile = Profile.objects.get(user=request.user)
context = {
'profile': profile,
}
else:
context = {}
return render(request, 'home.html', context)
def working(request):
return render(request, 'working.html')
def help(request):
return render(request, 'help.html')
@login_required
def participate(request):
all_studies = Study.objects.all()
enrolled_studies = []
completed_studies = []
for study in all_studies:
if request.user in study.completed.all():
completed_studies.append(study)
elif request.user in study.enrolled.all():
enrolled_studies.append(study)
profile = Profile.objects.get(user=request.user)
context = {
'enrolled_studies':enrolled_studies,
'completed_studies':completed_studies,
'profile': profile,
}
return render(request, 'participate/index.html', context)
@login_required
def part_discover(request):
user_profile = Profile.objects.get(user=request.user)
all_studies = Study.objects.all()
eligible_studies = []
for study in all_studies:
if user_profile.can_enroll(study):
eligible_studies.append(study)
context = {
'studies': eligible_studies,
'profile': user_profile,
}
return render(request, 'participate/discover.html', context)
@login_required
def profile(request):
profile = Profile.objects.get(user=request.user)
context={'profile':profile}
return render(request, 'profile/index.html', context)
def profile_view(request, username):
user = get_object_or_404(User, username=username)
profile = Profile.objects.get(user=user)
context = {
'user':user,
'profile':profile,
}
return render(request, 'profile/view.html', context)
@login_required
def profile_update(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
new_profile = p_form.save(commit=False)
new_profile.updated_once = True
new_profile.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
p_form['open_source_experience'].label = "Experienced With Open Source Development?"
p_form['email_opt_in'].label = "Opt In For Email Notifications?"
context = {
'profile': profile,
'u_form': u_form,
'p_form': p_form
}
return render(request, 'profile/update.html', context)
@login_required
def research(request):
profile = Profile.objects.get(user=request.user)
studies = Study.objects.filter(owner=request.user)
context = {
'profile':profile,
'studies':studies
}
return render(request, 'research/index.html', context)
| true | true |
f72b7a4a8f6c0949e5829db8520e9af17ce1d0a0 | 463 | py | Python | docs/newplatform.py | PlayerG9/PyMessageBox | 21b113c5e1a322e8412a412df071cc392b40d7c5 | [
"MIT"
] | 2 | 2021-06-28T12:35:08.000Z | 2022-03-11T16:48:01.000Z | docs/newplatform.py | PlayerG9/PyMessageBox | 21b113c5e1a322e8412a412df071cc392b40d7c5 | [
"MIT"
] | null | null | null | docs/newplatform.py | PlayerG9/PyMessageBox | 21b113c5e1a322e8412a412df071cc392b40d7c5 | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
r"""
"""
def showinfo(title: str, message: str):
pass
def showwarning(title: str, message: str):
pass
def showerror(title: str, message: str):
pass
def askquestion(title: str, message: str):
pass
def askokcancel(title: str, message: str):
pass
def askyesno(title: str, message: str):
pass
def askyesnocancel(title: str, message: str):
pass
def askretrycancel(title: str, message: str):
pass
| 12.513514 | 45 | 0.641469 |
def showinfo(title: str, message: str):
pass
def showwarning(title: str, message: str):
pass
def showerror(title: str, message: str):
pass
def askquestion(title: str, message: str):
pass
def askokcancel(title: str, message: str):
pass
def askyesno(title: str, message: str):
pass
def askyesnocancel(title: str, message: str):
pass
def askretrycancel(title: str, message: str):
pass
| true | true |
f72b7a657af7cedbaad0f606f0d079a66c7b1ee8 | 314 | py | Python | home/models.py | nikhilchaudhary0126/neva | f86b6d2dfa047360f6e07a621b985faa2120d009 | [
"MIT"
] | null | null | null | home/models.py | nikhilchaudhary0126/neva | f86b6d2dfa047360f6e07a621b985faa2120d009 | [
"MIT"
] | null | null | null | home/models.py | nikhilchaudhary0126/neva | f86b6d2dfa047360f6e07a621b985faa2120d009 | [
"MIT"
] | null | null | null | from django.db import models
class Location(models.Model):
address = models.CharField(max_length=30)
addresstype = models.CharField(max_length=20)
city = models.CharField(max_length=30)
state = models.CharField(max_length=30)
latitude = models.FloatField()
longitude = models.FloatField() | 31.4 | 49 | 0.738854 | from django.db import models
class Location(models.Model):
address = models.CharField(max_length=30)
addresstype = models.CharField(max_length=20)
city = models.CharField(max_length=30)
state = models.CharField(max_length=30)
latitude = models.FloatField()
longitude = models.FloatField() | true | true |
f72b7ac60389bda7cc60bcc36a3a0881db868e79 | 170 | py | Python | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_LSTM.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_LSTM.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_LSTM.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['LSTM'] ); | 42.5 | 97 | 0.770588 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['LSTM'] ); | true | true |
f72b7c7528116153eec48525b0ed3411067b3ddc | 4,627 | py | Python | doc/pvtol-nested.py | stelselim/python-control | d73b635d2b130af5c2829eefd59c99b9bd53fb01 | [
"BSD-3-Clause"
] | 1,112 | 2015-01-14T08:01:33.000Z | 2022-03-31T11:54:00.000Z | doc/pvtol-nested.py | stelselim/python-control | d73b635d2b130af5c2829eefd59c99b9bd53fb01 | [
"BSD-3-Clause"
] | 646 | 2015-02-02T15:35:23.000Z | 2022-03-30T08:19:26.000Z | doc/pvtol-nested.py | stelselim/python-control | d73b635d2b130af5c2829eefd59c99b9bd53fb01 | [
"BSD-3-Clause"
] | 366 | 2015-01-28T17:58:06.000Z | 2022-03-29T11:04:10.000Z | # pvtol-nested.py - inner/outer design for vectored thrust aircraft
# RMM, 5 Sep 09
#
# This file works through a fairly complicated control design and
# analysis, corresponding to the planar vertical takeoff and landing
# (PVTOL) aircraft in Astrom and Murray, Chapter 11. It is intended
# to demonstrate the basic functionality of the python-control
# package.
#
from __future__ import print_function
import os
import matplotlib.pyplot as plt # MATLAB plotting functions
from control.matlab import * # MATLAB-like functions
import numpy as np
# System parameters
m = 4 # mass of aircraft
J = 0.0475 # inertia around pitch axis
r = 0.25 # distance to center of force
g = 9.8 # gravitational constant
c = 0.05 # damping factor (estimated)
# Transfer functions for dynamics
Pi = tf([r], [J, 0, 0]) # inner loop (roll)
Po = tf([1], [m, c, 0]) # outer loop (position)
#
# Inner loop control design
#
# This is the controller for the pitch dynamics. Goal is to have
# fast response for the pitch dynamics so that we can use this as a
# control for the lateral dynamics
#
# Design a simple lead controller for the system
k, a, b = 200, 2, 50
Ci = k*tf([1, a], [1, b]) # lead compensator
Li = Pi*Ci
# Bode plot for the open loop process
plt.figure(1)
bode(Pi)
# Bode plot for the loop transfer function, with margins
plt.figure(2)
bode(Li)
# Compute out the gain and phase margins
#! Not implemented
# gm, pm, wcg, wcp = margin(Li)
# Compute the sensitivity and complementary sensitivity functions
Si = feedback(1, Li)
Ti = Li*Si
# Check to make sure that the specification is met
plt.figure(3)
gangof4(Pi, Ci)
# Compute out the actual transfer function from u1 to v1 (see L8.2 notes)
# Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi)
Hi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1))
plt.figure(4)
plt.clf()
plt.subplot(221)
bode(Hi)
# Now design the lateral control system
a, b, K = 0.02, 5, 2
Co = -K*tf([1, 0.3], [1, 10]) # another lead compensator
Lo = -m*g*Po*Co
plt.figure(5)
bode(Lo) # margin(Lo)
# Finally compute the real outer-loop loop gain + responses
L = Co*Hi*Po
S = feedback(1, L)
T = feedback(L, 1)
# Compute stability margins
gm, pm, wgc, wpc = margin(L)
print("Gain margin: %g at %g" % (gm, wgc))
print("Phase margin: %g at %g" % (pm, wpc))
plt.figure(6)
plt.clf()
bode(L, np.logspace(-4, 3))
# Add crossover line to the magnitude plot
#
# Note: in matplotlib before v2.1, the following code worked:
#
# plt.subplot(211); hold(True);
# loglog([1e-4, 1e3], [1, 1], 'k-')
#
# In later versions of matplotlib the call to plt.subplot will clear the
# axes and so we have to extract the axes that we want to use by hand.
# In addition, hold() is deprecated so we no longer require it.
#
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-magnitude':
break
ax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-')
#
# Replot phase starting at -90 degrees
#
# Get the phase plot axes
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-phase':
break
# Recreate the frequency response and shift the phase
mag, phase, w = freqresp(L, np.logspace(-4, 3))
phase = phase - 360
# Replot the phase by hand
ax.semilogx([1e-4, 1e3], [-180, -180], 'k-')
ax.semilogx(w, np.squeeze(phase), 'b-')
ax.axis([1e-4, 1e3, -360, 0])
plt.xlabel('Frequency [deg]')
plt.ylabel('Phase [deg]')
# plt.set(gca, 'YTick', [-360, -270, -180, -90, 0])
# plt.set(gca, 'XTick', [10^-4, 10^-2, 1, 100])
#
# Nyquist plot for complete design
#
plt.figure(7)
plt.clf()
nyquist(L, (0.0001, 1000))
plt.axis([-700, 5300, -3000, 3000])
# Add a box in the region we are going to expand
plt.plot([-400, -400, 200, 200, -400], [-100, 100, 100, -100, -100], 'r-')
# Expanded region
plt.figure(8)
plt.clf()
plt.subplot(231)
nyquist(L)
plt.axis([-10, 5, -20, 20])
# set up the color
color = 'b'
# Add arrows to the plot
# H1 = L.evalfr(0.4); H2 = L.evalfr(0.41);
# arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
# H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36);
# arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
plt.figure(9)
Yvec, Tvec = step(T, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
Yvec, Tvec = step(Co*S, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
plt.figure(10)
plt.clf()
P, Z = pzmap(T, plot=True, grid=True)
print("Closed loop poles and zeros: ", P, Z)
# Gang of Four
plt.figure(11)
plt.clf()
gangof4(Hi*Po, Co)
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
| 25.849162 | 76 | 0.661336 |
from __future__ import print_function
import os
import matplotlib.pyplot as plt
from control.matlab import *
import numpy as np
m = 4
J = 0.0475
r = 0.25
g = 9.8
c = 0.05
Pi = tf([r], [J, 0, 0])
Po = tf([1], [m, c, 0])
k, a, b = 200, 2, 50
Ci = k*tf([1, a], [1, b])
Li = Pi*Ci
plt.figure(1)
bode(Pi)
plt.figure(2)
bode(Li)
Si = feedback(1, Li)
Ti = Li*Si
plt.figure(3)
gangof4(Pi, Ci)
Hi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1))
plt.figure(4)
plt.clf()
plt.subplot(221)
bode(Hi)
a, b, K = 0.02, 5, 2
Co = -K*tf([1, 0.3], [1, 10])
Lo = -m*g*Po*Co
plt.figure(5)
bode(Lo)
L = Co*Hi*Po
S = feedback(1, L)
T = feedback(L, 1)
gm, pm, wgc, wpc = margin(L)
print("Gain margin: %g at %g" % (gm, wgc))
print("Phase margin: %g at %g" % (pm, wpc))
plt.figure(6)
plt.clf()
bode(L, np.logspace(-4, 3))
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-magnitude':
break
ax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-')
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-phase':
break
mag, phase, w = freqresp(L, np.logspace(-4, 3))
phase = phase - 360
ax.semilogx([1e-4, 1e3], [-180, -180], 'k-')
ax.semilogx(w, np.squeeze(phase), 'b-')
ax.axis([1e-4, 1e3, -360, 0])
plt.xlabel('Frequency [deg]')
plt.ylabel('Phase [deg]')
plt.figure(7)
plt.clf()
nyquist(L, (0.0001, 1000))
plt.axis([-700, 5300, -3000, 3000])
plt.plot([-400, -400, 200, 200, -400], [-100, 100, 100, -100, -100], 'r-')
plt.figure(8)
plt.clf()
plt.subplot(231)
nyquist(L)
plt.axis([-10, 5, -20, 20])
color = 'b'
plt.figure(9)
Yvec, Tvec = step(T, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
Yvec, Tvec = step(Co*S, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
plt.figure(10)
plt.clf()
P, Z = pzmap(T, plot=True, grid=True)
print("Closed loop poles and zeros: ", P, Z)
plt.figure(11)
plt.clf()
gangof4(Hi*Po, Co)
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
| true | true |
f72b7e5c1c47e2d9063b237e29c8de4e5ecad2aa | 4,093 | py | Python | utils/vocab_reader.py | aaj-fullfact/slot_filling_and_intent_detection_of_SLU | 27adf381b9b087caa1e90bfafb88765e5e296192 | [
"Apache-2.0"
] | 361 | 2019-06-17T08:37:49.000Z | 2022-03-29T20:46:15.000Z | utils/vocab_reader.py | aaj-fullfact/slot_filling_and_intent_detection_of_SLU | 27adf381b9b087caa1e90bfafb88765e5e296192 | [
"Apache-2.0"
] | 9 | 2019-07-03T06:38:36.000Z | 2021-12-09T12:08:56.000Z | utils/vocab_reader.py | aaj-fullfact/slot_filling_and_intent_detection_of_SLU | 27adf381b9b087caa1e90bfafb88765e5e296192 | [
"Apache-2.0"
] | 100 | 2019-06-17T03:04:36.000Z | 2022-03-21T21:07:30.000Z | """Data utilities."""
#import torch
import operator
#import json
def read_vocab_file(vocab_path, bos_eos=False, no_pad=False, no_unk=False, separator=':'):
'''file format: "word : idx" '''
word2id, id2word = {}, {}
if not no_pad:
word2id['<pad>'] = len(word2id)
id2word[len(id2word)] = '<pad>'
if not no_unk:
word2id['<unk>'] = len(word2id)
id2word[len(id2word)] = '<unk>'
if bos_eos == True:
word2id['<s>'] = len(word2id)
id2word[len(id2word)] = '<s>'
word2id['</s>'] = len(word2id)
id2word[len(id2word)] = '</s>'
with open(vocab_path, 'r') as f:
for line in f:
if separator in line:
word, idx = line.strip('\r\n').split(' '+separator+' ')
idx = int(idx)
else:
word = line.strip()
idx = len(word2id)
if word not in word2id:
word2id[word] = idx
id2word[idx] = word
return word2id, id2word
def save_vocab(idx2word, vocab_path, separator=':'):
with open(vocab_path, 'w') as f:
for idx in range(len(idx2word)):
f.write(idx2word[idx]+' '+separator+' '+str(idx)+'\n')
def construct_vocab(input_seqs, vocab_config={'mini_word_freq':1, 'bos_eos':False}):
'''
@params:
1. input_seqs: a list of seqs.
2. vocab_config:
mini_word_freq: minimum word frequency
bos_eos: <s> </s>
@return:
1. word2idx
2. idx2word
'''
vocab = {}
for seq in input_seqs:
if type(seq) == type([]):
for word in seq:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
else:
assert type(seq) == str
if seq not in vocab:
vocab[seq] = 1
else:
vocab[seq] += 1
# Discard start, end, pad and unk tokens if already present
if '<s>' in vocab:
del vocab['<s>']
if '<pad>' in vocab:
del vocab['<pad>']
if '</s>' in vocab:
del vocab['</s>']
if '<unk>' in vocab:
del vocab['<unk>']
if vocab_config['bos_eos'] == True:
word2id = {'<pad>': 0, '<unk>': 1, '<s>': 2, '</s>': 3}
id2word = {0: '<pad>', 1: '<unk>', 2: '<s>', 3: '</s>'}
else:
word2id = {'<pad>': 0, '<unk>': 1,}
id2word = {0: '<pad>', 1: '<unk>',}
sorted_word2id = sorted(
vocab.items(),
key=operator.itemgetter(1),
reverse=True
)
sorted_words = [x[0] for x in sorted_word2id if x[1] >= vocab_config['mini_word_freq']]
for word in sorted_words:
idx = len(word2id)
word2id[word] = idx
id2word[idx] = word
return word2id, id2word
def read_vocab_from_data_file(data_path, vocab_config={'mini_word_freq':1, 'bos_eos':False, 'lowercase':False}, with_tag=True, separator=':'):
'''
Read data from files.
@params:
1. data_path: file path of data
2. vocab_config: config of how to build vocab. It is used when in_vocab == None.
@return:
1. input vocab
'''
print('Reading source data ...')
input_seqs = []
with open(data_path, 'r') as f:
for ind, line in enumerate(f):
slot_tag_line = line.strip('\n\r').split(' <=> ')[0]
if slot_tag_line == "":
continue
in_seq = []
for item in slot_tag_line.split(' '):
if with_tag:
tmp = item.split(separator)
assert len(tmp) >= 2
word, tag = separator.join(tmp[:-1]), tmp[-1]
else:
word = item
if vocab_config['lowercase']:
word = word.lower()
in_seq.append(word)
input_seqs.append(in_seq)
print('Constructing input vocabulary from ', data_path, ' ...')
word2idx, idx2word = construct_vocab(input_seqs, vocab_config)
return (word2idx, idx2word)
| 31.976563 | 142 | 0.50623 |
import operator
def read_vocab_file(vocab_path, bos_eos=False, no_pad=False, no_unk=False, separator=':'):
word2id, id2word = {}, {}
if not no_pad:
word2id['<pad>'] = len(word2id)
id2word[len(id2word)] = '<pad>'
if not no_unk:
word2id['<unk>'] = len(word2id)
id2word[len(id2word)] = '<unk>'
if bos_eos == True:
word2id['<s>'] = len(word2id)
id2word[len(id2word)] = '<s>'
word2id['</s>'] = len(word2id)
id2word[len(id2word)] = '</s>'
with open(vocab_path, 'r') as f:
for line in f:
if separator in line:
word, idx = line.strip('\r\n').split(' '+separator+' ')
idx = int(idx)
else:
word = line.strip()
idx = len(word2id)
if word not in word2id:
word2id[word] = idx
id2word[idx] = word
return word2id, id2word
def save_vocab(idx2word, vocab_path, separator=':'):
with open(vocab_path, 'w') as f:
for idx in range(len(idx2word)):
f.write(idx2word[idx]+' '+separator+' '+str(idx)+'\n')
def construct_vocab(input_seqs, vocab_config={'mini_word_freq':1, 'bos_eos':False}):
vocab = {}
for seq in input_seqs:
if type(seq) == type([]):
for word in seq:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
else:
assert type(seq) == str
if seq not in vocab:
vocab[seq] = 1
else:
vocab[seq] += 1
if '<s>' in vocab:
del vocab['<s>']
if '<pad>' in vocab:
del vocab['<pad>']
if '</s>' in vocab:
del vocab['</s>']
if '<unk>' in vocab:
del vocab['<unk>']
if vocab_config['bos_eos'] == True:
word2id = {'<pad>': 0, '<unk>': 1, '<s>': 2, '</s>': 3}
id2word = {0: '<pad>', 1: '<unk>', 2: '<s>', 3: '</s>'}
else:
word2id = {'<pad>': 0, '<unk>': 1,}
id2word = {0: '<pad>', 1: '<unk>',}
sorted_word2id = sorted(
vocab.items(),
key=operator.itemgetter(1),
reverse=True
)
sorted_words = [x[0] for x in sorted_word2id if x[1] >= vocab_config['mini_word_freq']]
for word in sorted_words:
idx = len(word2id)
word2id[word] = idx
id2word[idx] = word
return word2id, id2word
def read_vocab_from_data_file(data_path, vocab_config={'mini_word_freq':1, 'bos_eos':False, 'lowercase':False}, with_tag=True, separator=':'):
print('Reading source data ...')
input_seqs = []
with open(data_path, 'r') as f:
for ind, line in enumerate(f):
slot_tag_line = line.strip('\n\r').split(' <=> ')[0]
if slot_tag_line == "":
continue
in_seq = []
for item in slot_tag_line.split(' '):
if with_tag:
tmp = item.split(separator)
assert len(tmp) >= 2
word, tag = separator.join(tmp[:-1]), tmp[-1]
else:
word = item
if vocab_config['lowercase']:
word = word.lower()
in_seq.append(word)
input_seqs.append(in_seq)
print('Constructing input vocabulary from ', data_path, ' ...')
word2idx, idx2word = construct_vocab(input_seqs, vocab_config)
return (word2idx, idx2word)
| true | true |
f72b7f156678de318e745bd933773f338fc00cc7 | 15,626 | py | Python | tests/jet_test.py | machineko/jax | 5a9048a0058d027000afc5707413d24209aa6f9f | [
"Apache-2.0"
] | 7 | 2020-12-04T16:54:54.000Z | 2022-02-15T07:26:56.000Z | tests/jet_test.py | josephrocca/jax | ab544cb26dfea3147c336754d3e3eb457a405e38 | [
"Apache-2.0"
] | 20 | 2021-08-17T20:31:56.000Z | 2022-03-31T11:56:24.000Z | tests/jet_test.py | kbnarayanavit/jax | 1e3c4833c97302caf6046ff99656b8ff21430b8d | [
"Apache-2.0"
] | 1 | 2021-08-11T20:57:59.000Z | 2021-08-11T20:57:59.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce, partial
from absl.testing import absltest
import numpy as np
import unittest
import jax
from jax import test_util as jtu
import jax.numpy as jnp
import jax.scipy.special
from jax import random
from jax import jacfwd, jit
from jax.experimental import stax
from jax.experimental.jet import jet, fact, zero_series
from jax import lax
from jax.config import config
config.parse_flags_with_absl()
def jvp_taylor(fun, primals, series):
# Computes the Taylor series the slow way, with nested jvp.
order, = set(map(len, series))
primals = tuple(jnp.asarray(p) for p in primals)
def composition(eps):
taylor_terms = [sum([eps ** (i+1) * terms[i] / fact(i + 1)
for i in range(len(terms))]) for terms in series]
nudged_args = [(x + t).astype(x.dtype) for x, t in zip(primals, taylor_terms)]
return fun(*nudged_args)
primal_out = fun(*primals)
terms_out = [repeated(jacfwd, i+1)(composition)(0.) for i in range(order)]
return primal_out, terms_out
def repeated(f, n):
def rfun(p):
return reduce(lambda x, _: f(x), range(n), p)
return rfun
def transform(lims, x):
return x * (lims[1] - lims[0]) + lims[0]
class JetTest(jtu.JaxTestCase):
def check_jet(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
def check_jet_finite(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
def _convert(x):
return jnp.where(jnp.isfinite(x), x, jnp.nan)
y = _convert(y)
expected_y = _convert(expected_y)
terms = _convert(jnp.asarray(terms))
expected_terms = _convert(jnp.asarray(expected_terms))
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
@jtu.skip_on_devices("tpu")
def test_dot(self):
M, K, N = 2, 3, 4
order = 3
rng = np.random.RandomState(0)
x1 = rng.randn(M, K)
x2 = rng.randn(K, N)
primals = (x1, x2)
terms_in1 = [rng.randn(*x1.shape) for _ in range(order)]
terms_in2 = [rng.randn(*x2.shape) for _ in range(order)]
series_in = (terms_in1, terms_in2)
self.check_jet(jnp.dot, primals, series_in)
@jtu.skip_on_devices("tpu")
def test_conv(self):
order = 3
input_shape = (1, 5, 5, 1)
key = random.PRNGKey(0)
# TODO(duvenaud): Check all types of padding
init_fun, apply_fun = stax.Conv(3, (2, 2), padding='VALID')
_, (W, b) = init_fun(key, input_shape)
rng = np.random.RandomState(0)
x = rng.randn(*input_shape)
primals = (W, b, x)
series_in1 = [rng.randn(*W.shape) for _ in range(order)]
series_in2 = [rng.randn(*b.shape) for _ in range(order)]
series_in3 = [rng.randn(*x.shape) for _ in range(order)]
series_in = (series_in1, series_in2, series_in3)
def f(W, b, x):
return apply_fun((W, b), x)
self.check_jet(f, primals, series_in, check_dtypes=False)
def unary_check(self, fun, lims=[-2, 2], order=3, dtype=None, atol=1e-4,
rtol=1e-4):
dims = 2, 3
rng = np.random.RandomState(0)
if dtype is None:
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = rng(dims, dtype)
terms_in = [rng(dims, dtype) for _ in range(order)]
self.check_jet(fun, (primal_in,), (terms_in,), atol, rtol)
def binary_check(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):
dims = 2, 3
rng = np.random.RandomState(0)
if isinstance(lims, tuple):
x_lims, y_lims = lims
else:
x_lims, y_lims = lims, lims
if dtype is None:
primal_in = (transform(x_lims, rng.rand(*dims)),
transform(y_lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = (rng(dims, dtype),
rng(dims, dtype))
series_in = ([rng(dims, dtype) for _ in range(order)],
[rng(dims, dtype) for _ in range(order)])
if finite:
self.check_jet(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
else:
self.check_jet_finite(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
def unary_check_float0(self, fun, lims=[-2, 2], order=3, dtype=None):
# like unary_check but for functions that output integers (so their tangent
# type is float0 arrays)
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def binary_check_float0(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):
# like binary_check but for functions that output integers (so their tangent
# type is float0 arrays)
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def expit_check(self, lims=[-2, 2], order=3):
dims = 2, 3
rng = np.random.RandomState(0)
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
primals = (primal_in, )
series = (terms_in, )
y, terms = jax.experimental.jet._expit_taylor(primals, series)
expected_y, expected_terms = jvp_taylor(jax.scipy.special.expit, primals, series)
atol = 1e-4
rtol = 1e-4
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol)
@jtu.skip_on_devices("tpu")
def test_int_pow(self):
for p in range(6):
self.unary_check(lambda x: x ** p, lims=[-2, 2])
self.unary_check(lambda x: x ** 10, lims=[0, 0])
@jtu.skip_on_devices("tpu")
def test_is_finite(self): self.unary_check_float0(lax.is_finite)
@jtu.skip_on_devices("tpu")
def test_and(self): self.binary_check_float0(lax.bitwise_and, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_or(self): self.binary_check_float0(lax.bitwise_or, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_xor(self): self.binary_check_float0(jnp.bitwise_xor, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_shift_left(self): self.binary_check_float0(lax.shift_left, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_a(self): self.binary_check_float0(lax.shift_right_arithmetic, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_l(self): self.binary_check_float0(lax.shift_right_logical, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_le(self): self.binary_check_float0(lambda x, y: x <= y)
@jtu.skip_on_devices("tpu")
def test_gt(self): self.binary_check_float0(lambda x, y: x > y)
@jtu.skip_on_devices("tpu")
def test_lt(self): self.binary_check_float0(lambda x, y: x < y)
@jtu.skip_on_devices("tpu")
def test_ge(self): self.binary_check_float0(lambda x, y: x >= y)
@jtu.skip_on_devices("tpu")
def test_eq(self): self.binary_check_float0(lambda x, y: x == y)
@jtu.skip_on_devices("tpu")
def test_ne(self): self.binary_check_float0(lambda x, y: x != y)
@jtu.skip_on_devices("tpu")
def test_not(self): self.unary_check_float0(lax.bitwise_not, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_exp(self): self.unary_check(jnp.exp)
@jtu.skip_on_devices("tpu")
def test_neg(self): self.unary_check(jnp.negative)
@jtu.skip_on_devices("tpu")
def test_floor(self): self.unary_check(jnp.floor)
@jtu.skip_on_devices("tpu")
def test_ceil(self): self.unary_check(jnp.ceil)
@jtu.skip_on_devices("tpu")
def test_round(self): self.unary_check(lax.round)
@jtu.skip_on_devices("tpu")
def test_sign(self): self.unary_check(lax.sign)
@jtu.skip_on_devices("tpu")
def test_real(self): self.unary_check(lax.real, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_conj(self): self.unary_check(lax.conj, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_imag(self): self.unary_check(lax.imag, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_log(self): self.unary_check(jnp.log, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_gather(self): self.unary_check(lambda x: x[1:])
@jtu.skip_on_devices("tpu")
def test_reduce_max(self): self.unary_check(lambda x: x.max(axis=1))
@jtu.skip_on_devices("tpu")
def test_reduce_min(self): self.unary_check(lambda x: x.min(axis=1))
@jtu.skip_on_devices("tpu")
def test_all_max(self): self.unary_check(jnp.max)
@jtu.skip_on_devices("tpu")
def test_all_min(self): self.unary_check(jnp.min)
@jtu.skip_on_devices("tpu")
def test_stopgrad(self): self.unary_check(lax.stop_gradient)
@jtu.skip_on_devices("tpu")
def test_abs(self): self.unary_check(jnp.abs)
@jtu.skip_on_devices("tpu")
def test_fft(self): self.unary_check(jnp.fft.fft)
@jtu.skip_on_devices("tpu")
def test_log1p(self): self.unary_check(jnp.log1p, lims=[0, 4.])
@jtu.skip_on_devices("tpu")
def test_expm1(self): self.unary_check(jnp.expm1)
@jtu.skip_on_devices("tpu")
def test_sin(self): self.unary_check(jnp.sin)
@jtu.skip_on_devices("tpu")
def test_cos(self): self.unary_check(jnp.cos)
@jtu.skip_on_devices("tpu")
def test_sinh(self): self.unary_check(jnp.sinh)
@jtu.skip_on_devices("tpu")
def test_cosh(self): self.unary_check(jnp.cosh)
@jtu.skip_on_devices("tpu")
def test_tanh(self): self.unary_check(jnp.tanh, lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_expit(self): self.unary_check(jax.scipy.special.expit, lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_expit2(self): self.expit_check(lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_sqrt(self): self.unary_check(jnp.sqrt, lims=[0, 5.])
@jtu.skip_on_devices("tpu")
def test_rsqrt(self): self.unary_check(lax.rsqrt, lims=[0, 5000.])
@jtu.skip_on_devices("tpu")
def test_asinh(self): self.unary_check(lax.asinh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_acosh(self): self.unary_check(lax.acosh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_atanh(self): self.unary_check(lax.atanh, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_erf(self): self.unary_check(lax.erf)
@jtu.skip_on_devices("tpu")
def test_erfc(self): self.unary_check(lax.erfc)
@jtu.skip_on_devices("tpu")
def test_erf_inv(self): self.unary_check(lax.erf_inv, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_cumsum(self): self.unary_check(jnp.cumsum)
@jtu.skip_on_devices("tpu")
def test_cumprod(self): self.unary_check(jnp.cumprod)
@jtu.skip_on_devices("tpu")
def test_cummax(self): self.unary_check(partial(lax.cummax, axis=0))
@jtu.skip_on_devices("tpu")
def test_cummin(self): self.unary_check(partial(lax.cummin, axis=0))
@jtu.skip_on_devices("tpu")
def test_div(self): self.binary_check(lambda x, y: x / y, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_rem(self): self.binary_check(lax.rem, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_complex(self): self.binary_check(lax.complex)
@jtu.skip_on_devices("tpu")
def test_sub(self): self.binary_check(lambda x, y: x - y)
@jtu.skip_on_devices("tpu")
def test_add(self): self.binary_check(lambda x, y: x + y)
@jtu.skip_on_devices("tpu")
def test_mul(self): self.binary_check(lambda x, y: x * y)
@jtu.skip_on_devices("tpu")
def test_max(self): self.binary_check(lax.max)
@jtu.skip_on_devices("tpu")
def test_min(self): self.binary_check(lax.min)
@jtu.skip_on_devices("tpu")
@jtu.ignore_warning(message="overflow encountered in power")
def test_pow(self): self.binary_check(lambda x, y: x ** y, lims=([0.2, 500], [-500, 500]), finite=False)
@jtu.skip_on_devices("tpu")
def test_atan2(self): self.binary_check(lax.atan2, lims=[-40, 40])
@jtu.skip_on_devices("tpu")
def test_clamp(self):
lims = [-2, 2]
order = 3
dims = 2, 3
rng = np.random.RandomState(0)
primal_in = (transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
self.check_jet(lax.clamp, primal_in, series_in, atol=1e-4, rtol=1e-4)
def test_process_call(self):
def f(x):
return jit(lambda x: x * x)(x)
self.unary_check(f, rtol=2e-4)
def test_post_process_call(self):
def f(x):
return jit(lambda y: x * y)(2.)
self.unary_check(f, rtol=5e-4)
def test_select(self):
M, K = 2, 3
order = 3
rng = np.random.RandomState(0)
b = rng.rand(M, K) < 0.5
x = rng.randn(M, K)
y = rng.randn(M, K)
primals = (b, x, y)
terms_b = [rng.randn(*b.shape) for _ in range(order)]
terms_x = [rng.randn(*x.shape) for _ in range(order)]
terms_y = [rng.randn(*y.shape) for _ in range(order)]
series_in = (terms_b, terms_x, terms_y)
self.check_jet(jnp.where, primals, series_in, rtol=5e-4)
def test_inst_zero(self):
def f(x):
return 2.
def g(x):
return 2. + 0 * x
x = jnp.ones(1)
order = 3
f_out_primals, f_out_series = jet(f, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
assert f_out_series is not zero_series
g_out_primals, g_out_series = jet(g, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
assert g_out_primals == f_out_primals
assert g_out_series == f_out_series
def test_add_any(self):
# https://github.com/google/jax/issues/5217
f = lambda x, eps: x * eps + eps + x
def g(eps):
x = jnp.array(1.)
return jax.grad(f)(x, eps)
jet(g, (1.,), ([1.],)) # doesn't crash
def test_scatter_add(self):
# very basic test from https://github.com/google/jax/issues/5365
def f(x):
x0 = x[0]
x1 = x[1]
return (x0**5 + x1**5).sum()
def h(eps):
from jax import jacfwd, grad
x = jnp.array([1., 1.])
μ = eps * x
def F(t):
return f(x + t * μ)
return grad(jacfwd(F))(0.)
self.check_jet(h, (0.,), ([1., 2., 3.],), rtol=1e-3)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 37.653012 | 114 | 0.656534 |
from functools import reduce, partial
from absl.testing import absltest
import numpy as np
import unittest
import jax
from jax import test_util as jtu
import jax.numpy as jnp
import jax.scipy.special
from jax import random
from jax import jacfwd, jit
from jax.experimental import stax
from jax.experimental.jet import jet, fact, zero_series
from jax import lax
from jax.config import config
config.parse_flags_with_absl()
def jvp_taylor(fun, primals, series):
order, = set(map(len, series))
primals = tuple(jnp.asarray(p) for p in primals)
def composition(eps):
taylor_terms = [sum([eps ** (i+1) * terms[i] / fact(i + 1)
for i in range(len(terms))]) for terms in series]
nudged_args = [(x + t).astype(x.dtype) for x, t in zip(primals, taylor_terms)]
return fun(*nudged_args)
primal_out = fun(*primals)
terms_out = [repeated(jacfwd, i+1)(composition)(0.) for i in range(order)]
return primal_out, terms_out
def repeated(f, n):
def rfun(p):
return reduce(lambda x, _: f(x), range(n), p)
return rfun
def transform(lims, x):
return x * (lims[1] - lims[0]) + lims[0]
class JetTest(jtu.JaxTestCase):
def check_jet(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
def check_jet_finite(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
def _convert(x):
return jnp.where(jnp.isfinite(x), x, jnp.nan)
y = _convert(y)
expected_y = _convert(expected_y)
terms = _convert(jnp.asarray(terms))
expected_terms = _convert(jnp.asarray(expected_terms))
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
@jtu.skip_on_devices("tpu")
def test_dot(self):
M, K, N = 2, 3, 4
order = 3
rng = np.random.RandomState(0)
x1 = rng.randn(M, K)
x2 = rng.randn(K, N)
primals = (x1, x2)
terms_in1 = [rng.randn(*x1.shape) for _ in range(order)]
terms_in2 = [rng.randn(*x2.shape) for _ in range(order)]
series_in = (terms_in1, terms_in2)
self.check_jet(jnp.dot, primals, series_in)
@jtu.skip_on_devices("tpu")
def test_conv(self):
order = 3
input_shape = (1, 5, 5, 1)
key = random.PRNGKey(0)
init_fun, apply_fun = stax.Conv(3, (2, 2), padding='VALID')
_, (W, b) = init_fun(key, input_shape)
rng = np.random.RandomState(0)
x = rng.randn(*input_shape)
primals = (W, b, x)
series_in1 = [rng.randn(*W.shape) for _ in range(order)]
series_in2 = [rng.randn(*b.shape) for _ in range(order)]
series_in3 = [rng.randn(*x.shape) for _ in range(order)]
series_in = (series_in1, series_in2, series_in3)
def f(W, b, x):
return apply_fun((W, b), x)
self.check_jet(f, primals, series_in, check_dtypes=False)
def unary_check(self, fun, lims=[-2, 2], order=3, dtype=None, atol=1e-4,
rtol=1e-4):
dims = 2, 3
rng = np.random.RandomState(0)
if dtype is None:
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = rng(dims, dtype)
terms_in = [rng(dims, dtype) for _ in range(order)]
self.check_jet(fun, (primal_in,), (terms_in,), atol, rtol)
def binary_check(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):
dims = 2, 3
rng = np.random.RandomState(0)
if isinstance(lims, tuple):
x_lims, y_lims = lims
else:
x_lims, y_lims = lims, lims
if dtype is None:
primal_in = (transform(x_lims, rng.rand(*dims)),
transform(y_lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = (rng(dims, dtype),
rng(dims, dtype))
series_in = ([rng(dims, dtype) for _ in range(order)],
[rng(dims, dtype) for _ in range(order)])
if finite:
self.check_jet(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
else:
self.check_jet_finite(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
def unary_check_float0(self, fun, lims=[-2, 2], order=3, dtype=None):
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def binary_check_float0(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def expit_check(self, lims=[-2, 2], order=3):
dims = 2, 3
rng = np.random.RandomState(0)
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
primals = (primal_in, )
series = (terms_in, )
y, terms = jax.experimental.jet._expit_taylor(primals, series)
expected_y, expected_terms = jvp_taylor(jax.scipy.special.expit, primals, series)
atol = 1e-4
rtol = 1e-4
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol)
@jtu.skip_on_devices("tpu")
def test_int_pow(self):
for p in range(6):
self.unary_check(lambda x: x ** p, lims=[-2, 2])
self.unary_check(lambda x: x ** 10, lims=[0, 0])
@jtu.skip_on_devices("tpu")
def test_is_finite(self): self.unary_check_float0(lax.is_finite)
@jtu.skip_on_devices("tpu")
def test_and(self): self.binary_check_float0(lax.bitwise_and, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_or(self): self.binary_check_float0(lax.bitwise_or, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_xor(self): self.binary_check_float0(jnp.bitwise_xor, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_shift_left(self): self.binary_check_float0(lax.shift_left, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_a(self): self.binary_check_float0(lax.shift_right_arithmetic, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_l(self): self.binary_check_float0(lax.shift_right_logical, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_le(self): self.binary_check_float0(lambda x, y: x <= y)
@jtu.skip_on_devices("tpu")
def test_gt(self): self.binary_check_float0(lambda x, y: x > y)
@jtu.skip_on_devices("tpu")
def test_lt(self): self.binary_check_float0(lambda x, y: x < y)
@jtu.skip_on_devices("tpu")
def test_ge(self): self.binary_check_float0(lambda x, y: x >= y)
@jtu.skip_on_devices("tpu")
def test_eq(self): self.binary_check_float0(lambda x, y: x == y)
@jtu.skip_on_devices("tpu")
def test_ne(self): self.binary_check_float0(lambda x, y: x != y)
@jtu.skip_on_devices("tpu")
def test_not(self): self.unary_check_float0(lax.bitwise_not, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_exp(self): self.unary_check(jnp.exp)
@jtu.skip_on_devices("tpu")
def test_neg(self): self.unary_check(jnp.negative)
@jtu.skip_on_devices("tpu")
def test_floor(self): self.unary_check(jnp.floor)
@jtu.skip_on_devices("tpu")
def test_ceil(self): self.unary_check(jnp.ceil)
@jtu.skip_on_devices("tpu")
def test_round(self): self.unary_check(lax.round)
@jtu.skip_on_devices("tpu")
def test_sign(self): self.unary_check(lax.sign)
@jtu.skip_on_devices("tpu")
def test_real(self): self.unary_check(lax.real, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_conj(self): self.unary_check(lax.conj, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_imag(self): self.unary_check(lax.imag, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_log(self): self.unary_check(jnp.log, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_gather(self): self.unary_check(lambda x: x[1:])
@jtu.skip_on_devices("tpu")
def test_reduce_max(self): self.unary_check(lambda x: x.max(axis=1))
@jtu.skip_on_devices("tpu")
def test_reduce_min(self): self.unary_check(lambda x: x.min(axis=1))
@jtu.skip_on_devices("tpu")
def test_all_max(self): self.unary_check(jnp.max)
@jtu.skip_on_devices("tpu")
def test_all_min(self): self.unary_check(jnp.min)
@jtu.skip_on_devices("tpu")
def test_stopgrad(self): self.unary_check(lax.stop_gradient)
@jtu.skip_on_devices("tpu")
def test_abs(self): self.unary_check(jnp.abs)
@jtu.skip_on_devices("tpu")
def test_fft(self): self.unary_check(jnp.fft.fft)
@jtu.skip_on_devices("tpu")
def test_log1p(self): self.unary_check(jnp.log1p, lims=[0, 4.])
@jtu.skip_on_devices("tpu")
def test_expm1(self): self.unary_check(jnp.expm1)
@jtu.skip_on_devices("tpu")
def test_sin(self): self.unary_check(jnp.sin)
@jtu.skip_on_devices("tpu")
def test_cos(self): self.unary_check(jnp.cos)
@jtu.skip_on_devices("tpu")
def test_sinh(self): self.unary_check(jnp.sinh)
@jtu.skip_on_devices("tpu")
def test_cosh(self): self.unary_check(jnp.cosh)
@jtu.skip_on_devices("tpu")
def test_tanh(self): self.unary_check(jnp.tanh, lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_expit(self): self.unary_check(jax.scipy.special.expit, lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_expit2(self): self.expit_check(lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_sqrt(self): self.unary_check(jnp.sqrt, lims=[0, 5.])
@jtu.skip_on_devices("tpu")
def test_rsqrt(self): self.unary_check(lax.rsqrt, lims=[0, 5000.])
@jtu.skip_on_devices("tpu")
def test_asinh(self): self.unary_check(lax.asinh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_acosh(self): self.unary_check(lax.acosh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_atanh(self): self.unary_check(lax.atanh, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_erf(self): self.unary_check(lax.erf)
@jtu.skip_on_devices("tpu")
def test_erfc(self): self.unary_check(lax.erfc)
@jtu.skip_on_devices("tpu")
def test_erf_inv(self): self.unary_check(lax.erf_inv, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_cumsum(self): self.unary_check(jnp.cumsum)
@jtu.skip_on_devices("tpu")
def test_cumprod(self): self.unary_check(jnp.cumprod)
@jtu.skip_on_devices("tpu")
def test_cummax(self): self.unary_check(partial(lax.cummax, axis=0))
@jtu.skip_on_devices("tpu")
def test_cummin(self): self.unary_check(partial(lax.cummin, axis=0))
@jtu.skip_on_devices("tpu")
def test_div(self): self.binary_check(lambda x, y: x / y, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_rem(self): self.binary_check(lax.rem, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_complex(self): self.binary_check(lax.complex)
@jtu.skip_on_devices("tpu")
def test_sub(self): self.binary_check(lambda x, y: x - y)
@jtu.skip_on_devices("tpu")
def test_add(self): self.binary_check(lambda x, y: x + y)
@jtu.skip_on_devices("tpu")
def test_mul(self): self.binary_check(lambda x, y: x * y)
@jtu.skip_on_devices("tpu")
def test_max(self): self.binary_check(lax.max)
@jtu.skip_on_devices("tpu")
def test_min(self): self.binary_check(lax.min)
@jtu.skip_on_devices("tpu")
@jtu.ignore_warning(message="overflow encountered in power")
def test_pow(self): self.binary_check(lambda x, y: x ** y, lims=([0.2, 500], [-500, 500]), finite=False)
@jtu.skip_on_devices("tpu")
def test_atan2(self): self.binary_check(lax.atan2, lims=[-40, 40])
@jtu.skip_on_devices("tpu")
def test_clamp(self):
lims = [-2, 2]
order = 3
dims = 2, 3
rng = np.random.RandomState(0)
primal_in = (transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
self.check_jet(lax.clamp, primal_in, series_in, atol=1e-4, rtol=1e-4)
def test_process_call(self):
def f(x):
return jit(lambda x: x * x)(x)
self.unary_check(f, rtol=2e-4)
def test_post_process_call(self):
def f(x):
return jit(lambda y: x * y)(2.)
self.unary_check(f, rtol=5e-4)
def test_select(self):
M, K = 2, 3
order = 3
rng = np.random.RandomState(0)
b = rng.rand(M, K) < 0.5
x = rng.randn(M, K)
y = rng.randn(M, K)
primals = (b, x, y)
terms_b = [rng.randn(*b.shape) for _ in range(order)]
terms_x = [rng.randn(*x.shape) for _ in range(order)]
terms_y = [rng.randn(*y.shape) for _ in range(order)]
series_in = (terms_b, terms_x, terms_y)
self.check_jet(jnp.where, primals, series_in, rtol=5e-4)
def test_inst_zero(self):
def f(x):
return 2.
def g(x):
return 2. + 0 * x
x = jnp.ones(1)
order = 3
f_out_primals, f_out_series = jet(f, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
assert f_out_series is not zero_series
g_out_primals, g_out_series = jet(g, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
assert g_out_primals == f_out_primals
assert g_out_series == f_out_series
def test_add_any(self):
f = lambda x, eps: x * eps + eps + x
def g(eps):
x = jnp.array(1.)
return jax.grad(f)(x, eps)
jet(g, (1.,), ([1.],))
def test_scatter_add(self):
# very basic test from https://github.com/google/jax/issues/5365
def f(x):
x0 = x[0]
x1 = x[1]
return (x0**5 + x1**5).sum()
def h(eps):
from jax import jacfwd, grad
x = jnp.array([1., 1.])
μ = eps * x
def F(t):
return f(x + t * μ)
return grad(jacfwd(F))(0.)
self.check_jet(h, (0.,), ([1., 2., 3.],), rtol=1e-3)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.