code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# pyre-strict
import os
from copy import deepcopy
from typing import List, Optional
import torch
from torch import nn
from torchrecipes.core.test_utils.test_base import BaseTrainAppTestCase
from torchrecipes.utils.test import tempdir
from torchrecipes.vision.core.ops.fine_tuning_wrapper import FineTuningWrapper
from torchrecipes.vision.core.utils.model_weights import load_model_weights
from torchrecipes.vision.image_classification.train_app import (
ImageClassificationTrainApp,
)
from torchvision.models.resnet import resnet18
from torchvision.ops.misc import FrozenBatchNorm2d
class TestTrainApp(BaseTrainAppTestCase):
def _get_train_app(
self, tb_save_dir: str, test_overrides: Optional[List[str]] = None
) -> ImageClassificationTrainApp:
overrides: List[str] = [
"datamodule/datamodule=fake_data",
"+module.model.num_classes=10",
f"+tb_save_dir={tb_save_dir}",
]
app = self.create_app_from_hydra(
config_module="torchrecipes.vision.image_classification.conf",
config_name="train_app",
overrides=test_overrides if test_overrides else overrides,
)
self.mock_trainer_params(app, {"logger": True})
# pyre-fixme[7]: Expected `ImageClassificationTrainApp` but got `BaseTrainApp`.
return app
@tempdir
def test_train_model(self, root_dir: str) -> None:
train_app = self._get_train_app(tb_save_dir=root_dir)
# Train the model with the config
train_app.train()
@tempdir
def test_fine_tuning(self, root_dir: str) -> None:
pretrained = resnet18()
weights_path = os.path.join(root_dir, "weights.pth")
torch.save(pretrained.state_dict(), weights_path)
# prepare model for fine-tuning
trunk = resnet18(norm_layer=FrozenBatchNorm2d)
load_model_weights(trunk, weights_path)
head = nn.Linear(in_features=512, out_features=10)
fine_tune_model = FineTuningWrapper(trunk, "flatten", head)
origin_trunk = deepcopy(fine_tune_model.trunk)
# start fine-tuning
classification_train_app = self._get_train_app(tb_save_dir=root_dir)
# pyre-ignore[16]: ImageClassificationModule has model
classification_train_app.module.model = fine_tune_model
classification_train_app.train()
with torch.no_grad():
inp = torch.randn(1, 3, 28, 28)
origin_out = origin_trunk(inp)
# pyre-ignore[16]: ImageClassificationModule has model
tuned_out = classification_train_app.module.model.trunk(inp)
self.assertTrue(torch.equal(origin_out["flatten"], tuned_out["flatten"]))
|
[
"copy.deepcopy",
"torchrecipes.vision.core.ops.fine_tuning_wrapper.FineTuningWrapper",
"torch.equal",
"torch.randn",
"torchvision.models.resnet.resnet18",
"torch.nn.Linear",
"torch.no_grad",
"os.path.join",
"torchrecipes.vision.core.utils.model_weights.load_model_weights"
] |
[((1844, 1854), 'torchvision.models.resnet.resnet18', 'resnet18', ([], {}), '()\n', (1852, 1854), False, 'from torchvision.models.resnet import resnet18\n'), ((1879, 1916), 'os.path.join', 'os.path.join', (['root_dir', '"""weights.pth"""'], {}), "(root_dir, 'weights.pth')\n", (1891, 1916), False, 'import os\n'), ((2032, 2070), 'torchvision.models.resnet.resnet18', 'resnet18', ([], {'norm_layer': 'FrozenBatchNorm2d'}), '(norm_layer=FrozenBatchNorm2d)\n', (2040, 2070), False, 'from torchvision.models.resnet import resnet18\n'), ((2079, 2118), 'torchrecipes.vision.core.utils.model_weights.load_model_weights', 'load_model_weights', (['trunk', 'weights_path'], {}), '(trunk, weights_path)\n', (2097, 2118), False, 'from torchrecipes.vision.core.utils.model_weights import load_model_weights\n'), ((2134, 2177), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(512)', 'out_features': '(10)'}), '(in_features=512, out_features=10)\n', (2143, 2177), False, 'from torch import nn\n'), ((2204, 2245), 'torchrecipes.vision.core.ops.fine_tuning_wrapper.FineTuningWrapper', 'FineTuningWrapper', (['trunk', '"""flatten"""', 'head'], {}), "(trunk, 'flatten', head)\n", (2221, 2245), False, 'from torchrecipes.vision.core.ops.fine_tuning_wrapper import FineTuningWrapper\n'), ((2269, 2300), 'copy.deepcopy', 'deepcopy', (['fine_tune_model.trunk'], {}), '(fine_tune_model.trunk)\n', (2277, 2300), False, 'from copy import deepcopy\n'), ((2589, 2604), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2602, 2604), False, 'import torch\n'), ((2624, 2649), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(28)', '(28)'], {}), '(1, 3, 28, 28)\n', (2635, 2649), False, 'import torch\n'), ((2861, 2917), 'torch.equal', 'torch.equal', (["origin_out['flatten']", "tuned_out['flatten']"], {}), "(origin_out['flatten'], tuned_out['flatten'])\n", (2872, 2917), False, 'import torch\n')]
|
# Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import itertools
import functools
import requests
import netaddr
import lxml.etree
import bs4
from . import basepoller
LOG = logging.getLogger(__name__)
AZUREXML_URL = \
'https://www.microsoft.com/EN-US/DOWNLOAD/confirmation.aspx?id=41653'
AZUREJSON_URL = 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519'
REGIONS_XPATH = '/AzurePublicIpAddresses/Region'
def _build_IPv4(nodename, region, iprange):
iprange = iprange.get('Subnet', None)
if iprange is None:
LOG.error('%s - No Subnet', nodename)
return {}
try:
netaddr.IPNetwork(iprange)
except:
LOG.exception('%s - Invalid ip range: %s', nodename, iprange)
return {}
item = {
'indicator': iprange,
'type': 'IPv4',
'confidence': 100,
'azure_region': region,
'sources': ['azure.xml']
}
return item
def _build_IP(nodename, address_prefix, **keywords):
try:
ap = netaddr.IPNetwork(address_prefix)
except Exception:
LOG.exception('%s - Invalid ip range: %s', nodename, address_prefix)
return {}
if ap.version == 4:
type_ = 'IPv4'
elif ap.version == 6:
type_ = 'IPv6'
else:
LOG.error('{} - Unknown IP version: {}'.format(nodename, ap.version))
return {}
item = {
'indicator': address_prefix,
'type': type_,
'confidence': 100,
'sources': [nodename]
}
item.update(keywords)
return item
class AzureXML(basepoller.BasePollerFT):
def configure(self):
super(AzureXML, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', True)
def _process_item(self, item):
indicator = item.pop('indicator', None)
return [[indicator, item]]
def _build_request(self, now):
r = requests.Request(
'GET',
AZUREXML_URL
)
return r.prepare()
def _build_iterator(self, now):
_iterators = []
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
AZUREXML_URL,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
html_soup = bs4.BeautifulSoup(r.content, "lxml")
a = html_soup.find('a', class_='failoverLink')
if a is None:
LOG.error('%s - failoverLink not found', self.name)
raise RuntimeError('{} - failoverLink not found'.format(self.name))
LOG.debug('%s - download link: %s', self.name, a['href'])
rkwargs = dict(
stream=True,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
a['href'],
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
parser = lxml.etree.XMLParser()
for chunk in r.iter_content(chunk_size=10 * 1024):
parser.feed(chunk)
rtree = parser.close()
regions = rtree.xpath(REGIONS_XPATH)
for r in regions:
LOG.debug('%s - Extracting region: %s', self.name, r.get('Name'))
ipranges = r.xpath('IpRange')
_iterators.append(itertools.imap(
functools.partial(_build_IPv4, self.name, r.get('Name')),
ipranges
))
return itertools.chain(*_iterators)
class AzureJSON(basepoller.BasePollerFT):
def configure(self):
super(AzureJSON, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', True)
def _process_item(self, item):
indicator = item.pop('indicator', None)
return [[indicator, item]]
def _build_request(self, now):
r = requests.Request(
'GET',
AZUREJSON_URL
)
return r.prepare()
def _build_iterator(self, now):
_iterators = []
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
AZUREJSON_URL,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
html_soup = bs4.BeautifulSoup(r.content, "lxml")
a = html_soup.find('a', class_='failoverLink')
if a is None:
LOG.error('%s - failoverLink not found', self.name)
raise RuntimeError('{} - failoverLink not found'.format(self.name))
LOG.debug('%s - download link: %s', self.name, a['href'])
rkwargs = dict(
stream=True,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
a['href'],
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
rtree = r.json()
values = rtree.get('values', None)
if values is None:
LOG.error('{} - no values in JSON response'.format(self.name))
return []
for v in values:
LOG.debug('{} - Extracting value: {!r}'.format(self.name, v.get('id', None)))
id_ = v.get('id', None)
name = v.get('name', None)
props = v.get('properties', None)
if props is None:
LOG.error('{} - no properties in value'.format(self.name))
continue
region = props.get('region', None)
platform = props.get('platform', None)
system_service = props.get('systemService', None)
address_prefixes = props.get('addressPrefixes', [])
_iterators.append(itertools.imap(
functools.partial(
_build_IP,
self.name,
azure_name=name,
azure_id=id_,
azure_region=region,
azure_platform=platform,
azure_system_service=system_service
),
address_prefixes
))
return itertools.chain(*_iterators)
|
[
"functools.partial",
"netaddr.IPNetwork",
"requests.get",
"requests.Request",
"bs4.BeautifulSoup",
"itertools.chain",
"logging.getLogger"
] |
[((785, 812), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (802, 812), False, 'import logging\n'), ((1236, 1262), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['iprange'], {}), '(iprange)\n', (1253, 1262), False, 'import netaddr\n'), ((1622, 1655), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['address_prefix'], {}), '(address_prefix)\n', (1639, 1655), False, 'import netaddr\n'), ((2568, 2605), 'requests.Request', 'requests.Request', (['"""GET"""', 'AZUREXML_URL'], {}), "('GET', AZUREXML_URL)\n", (2584, 2605), False, 'import requests\n'), ((2881, 2918), 'requests.get', 'requests.get', (['AZUREXML_URL'], {}), '(AZUREXML_URL, **rkwargs)\n', (2893, 2918), False, 'import requests\n'), ((3172, 3208), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['r.content', '"""lxml"""'], {}), "(r.content, 'lxml')\n", (3189, 3208), False, 'import bs4\n'), ((3647, 3681), 'requests.get', 'requests.get', (["a['href']"], {}), "(a['href'], **rkwargs)\n", (3659, 3681), False, 'import requests\n'), ((4446, 4474), 'itertools.chain', 'itertools.chain', (['*_iterators'], {}), '(*_iterators)\n', (4461, 4474), False, 'import itertools\n'), ((4889, 4927), 'requests.Request', 'requests.Request', (['"""GET"""', 'AZUREJSON_URL'], {}), "('GET', AZUREJSON_URL)\n", (4905, 4927), False, 'import requests\n'), ((5203, 5241), 'requests.get', 'requests.get', (['AZUREJSON_URL'], {}), '(AZUREJSON_URL, **rkwargs)\n', (5215, 5241), False, 'import requests\n'), ((5495, 5531), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['r.content', '"""lxml"""'], {}), "(r.content, 'lxml')\n", (5512, 5531), False, 'import bs4\n'), ((5970, 6004), 'requests.get', 'requests.get', (["a['href']"], {}), "(a['href'], **rkwargs)\n", (5982, 6004), False, 'import requests\n'), ((7464, 7492), 'itertools.chain', 'itertools.chain', (['*_iterators'], {}), '(*_iterators)\n', (7479, 7492), False, 'import itertools\n'), ((7087, 7249), 'functools.partial', 'functools.partial', (['_build_IP', 'self.name'], {'azure_name': 'name', 'azure_id': 'id_', 'azure_region': 'region', 'azure_platform': 'platform', 'azure_system_service': 'system_service'}), '(_build_IP, self.name, azure_name=name, azure_id=id_,\n azure_region=region, azure_platform=platform, azure_system_service=\n system_service)\n', (7104, 7249), False, 'import functools\n')]
|
import xmlrpc.client as xmlrpclib
import pytest
from tests.factories import ReleaseFactory
@pytest.fixture(params=['/RPC2', '/pypi'])
def rpc_endpoint(request):
return request.param
@pytest.mark.django_db
def test_search_package_name(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': 'my-package'})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_package_summary(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'summary': ['Test summary']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_and(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'and')
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_or(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'or')
assert response == [{
'_pypi_ordering': 0,
'name': 'arcoiro',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-2',
'summary': 'arcoiro',
'version': '1.0.0'
}]
@pytest.mark.django_db
def test_search_invalid_fields_are_ignores(client, admin_user, live_server,
repository, rpc_endpoint):
ReleaseFactory(package__name='my-package',
package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'], 'invalid': ['Ops']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
|
[
"pytest.fixture",
"xmlrpc.client.ServerProxy",
"tests.factories.ReleaseFactory"
] |
[((96, 137), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['/RPC2', '/pypi']"}), "(params=['/RPC2', '/pypi'])\n", (110, 137), False, 'import pytest\n'), ((337, 439), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='my-package', package__repository=repository,\n summary='Test summary')\n", (351, 439), False, 'from tests.factories import ReleaseFactory\n'), ((467, 516), 'xmlrpc.client.ServerProxy', 'xmlrpclib.ServerProxy', (['(live_server + rpc_endpoint)'], {}), '(live_server + rpc_endpoint)\n', (488, 516), True, 'import xmlrpc.client as xmlrpclib\n'), ((873, 975), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='my-package', package__repository=repository,\n summary='Test summary')\n", (887, 975), False, 'from tests.factories import ReleaseFactory\n'), ((1003, 1052), 'xmlrpc.client.ServerProxy', 'xmlrpclib.ServerProxy', (['(live_server + rpc_endpoint)'], {}), '(live_server + rpc_endpoint)\n', (1024, 1052), True, 'import xmlrpc.client as xmlrpclib\n'), ((1410, 1514), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package-1"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='my-package-1', package__repository=repository,\n summary='Test summary')\n", (1424, 1514), False, 'from tests.factories import ReleaseFactory\n'), ((1554, 1653), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""arcoiro"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='arcoiro', package__repository=repository,\n summary='Test summary')\n", (1568, 1653), False, 'from tests.factories import ReleaseFactory\n'), ((1693, 1792), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package-2"""', 'package__repository': 'repository', 'summary': '"""arcoiro"""'}), "(package__name='my-package-2', package__repository=repository,\n summary='arcoiro')\n", (1707, 1792), False, 'from tests.factories import ReleaseFactory\n'), ((1841, 1890), 'xmlrpc.client.ServerProxy', 'xmlrpclib.ServerProxy', (['(live_server + rpc_endpoint)'], {}), '(live_server + rpc_endpoint)\n', (1862, 1890), True, 'import xmlrpc.client as xmlrpclib\n'), ((2309, 2413), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package-1"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='my-package-1', package__repository=repository,\n summary='Test summary')\n", (2323, 2413), False, 'from tests.factories import ReleaseFactory\n'), ((2453, 2552), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""arcoiro"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='arcoiro', package__repository=repository,\n summary='Test summary')\n", (2467, 2552), False, 'from tests.factories import ReleaseFactory\n'), ((2592, 2691), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package-2"""', 'package__repository': 'repository', 'summary': '"""arcoiro"""'}), "(package__name='my-package-2', package__repository=repository,\n summary='arcoiro')\n", (2606, 2691), False, 'from tests.factories import ReleaseFactory\n'), ((2740, 2789), 'xmlrpc.client.ServerProxy', 'xmlrpclib.ServerProxy', (['(live_server + rpc_endpoint)'], {}), '(live_server + rpc_endpoint)\n', (2761, 2789), True, 'import xmlrpc.client as xmlrpclib\n'), ((3573, 3675), 'tests.factories.ReleaseFactory', 'ReleaseFactory', ([], {'package__name': '"""my-package"""', 'package__repository': 'repository', 'summary': '"""Test summary"""'}), "(package__name='my-package', package__repository=repository,\n summary='Test summary')\n", (3587, 3675), False, 'from tests.factories import ReleaseFactory\n'), ((3724, 3773), 'xmlrpc.client.ServerProxy', 'xmlrpclib.ServerProxy', (['(live_server + rpc_endpoint)'], {}), '(live_server + rpc_endpoint)\n', (3745, 3773), True, 'import xmlrpc.client as xmlrpclib\n')]
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
import logging
import netaddr
import os
import pwd
import signal
import subprocess
import sys
import tempfile
import time
import yaml
try:
from urllib2 import HTTPError
from urllib2 import URLError
from urllib2 import urlopen
except ImportError:
# python3
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.request import urlopen
from cliff import command
from heatclient.common import template_utils
from openstackclient.i18n import _
from tripleoclient import constants
from tripleoclient import exceptions
from tripleoclient import fake_keystone
from tripleoclient import heat_launcher
from tripleo_common.utils import passwords as password_utils
# TODO(bogdando) rework the list by real requirements for the
# heat-container-image vs heat-native cases
REQUIRED_PACKAGES = iter([
'openstack-heat-api',
'openstack-heat-engine',
'openstack-heat-monolith',
'python-heat-agent',
'python-heat-agent-apply-config',
'python-heat-agent-hiera',
'python-heat-agent-puppet',
'python-heat-agent-docker-cmd',
'python-heat-agent-json-file',
'python-heat-agent-ansible',
'python-ipaddr',
'python-tripleoclient',
'docker',
'openvswitch',
'openstack-puppet-modules',
'yum-plugin-priorities',
'openstack-tripleo-common',
'openstack-tripleo-heat-templates',
'deltarpm'
])
class DeployUndercloud(command.Command):
"""Deploy Undercloud (experimental feature)"""
log = logging.getLogger(__name__ + ".DeployUndercloud")
auth_required = False
prerequisites = REQUIRED_PACKAGES
def _get_hostname(self):
p = subprocess.Popen(["hostname", "-s"], stdout=subprocess.PIPE)
return p.communicate()[0].rstrip()
def _install_prerequisites(self):
print('Checking for installed prerequisites ...')
processed = []
for p in self.prerequisites:
try:
subprocess.check_call(['rpm', '-q', p])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
processed.append(p)
elif e.returncode != 0:
raise Exception('Failed to check for prerequisites: '
'%s, the exit status %s'
% (p, e.returncode))
if len(processed) > 0:
print('Installing prerequisites ...')
subprocess.check_call(['yum', '-y', 'install'] + processed)
def _lookup_tripleo_server_stackid(self, client, stack_id):
server_stack_id = None
for X in client.resources.list(stack_id, nested_depth=6):
if X.resource_type in (
'OS::TripleO::Server',
'OS::TripleO::UndercloudServer'):
server_stack_id = X.physical_resource_id
return server_stack_id
def _launch_os_collect_config(self, keystone_port, stack_id):
print('Launching os-collect-config ...')
os.execvp('os-collect-config',
['os-collect-config',
'--polling-interval', '3',
'--heat-auth-url', 'http://127.0.0.1:%s/v3' % keystone_port,
'--heat-password', '<PASSWORD>',
'--heat-user-id', 'admin',
'--heat-project-id', 'admin',
'--heat-stack-id', stack_id,
'--heat-resource-name', 'deployed-server', 'heat'])
def _wait_local_port_ready(self, api_port):
count = 0
while count < 30:
time.sleep(1)
count += 1
try:
urlopen("http://127.0.0.1:%s/" % api_port, timeout=1)
except HTTPError as he:
if he.code == 300:
return True
pass
except URLError:
pass
return False
def _heat_deploy(self, stack_name, template_path, parameters,
environments, timeout, api_port, ks_port):
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
# NOTE(dprince): we use our own client here because we set
# auth_required=False above because keystone isn't running when this
# command starts
tripleoclients = self.app.client_manager.tripleoclient
orchestration_client = tripleoclients.local_orchestration(api_port,
ks_port)
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'environment': env,
'files': files,
}
if timeout:
stack_args['timeout_mins'] = timeout
self.log.info("Performing Heat stack create")
stack = orchestration_client.stacks.create(**stack_args)
stack_id = stack['stack']['id']
event_list_pid = self._fork_heat_event_list()
self.log.info("Looking up server stack id...")
server_stack_id = None
# NOTE(dprince) wait a bit to create the server_stack_id resource
for c in range(timeout * 60):
time.sleep(1)
server_stack_id = self._lookup_tripleo_server_stackid(
orchestration_client, stack_id)
if server_stack_id:
break
if not server_stack_id:
msg = ('Unable to find deployed server stack id. '
'See tripleo-heat-templates to ensure proper '
'"deployed-server" usage.')
raise Exception(msg)
self.log.debug("server_stack_id: %s" % server_stack_id)
pid = None
status = 'FAILED'
try:
pid = os.fork()
if pid == 0:
self._launch_os_collect_config(ks_port, server_stack_id)
else:
while True:
status = orchestration_client.stacks.get(stack_id).status
self.log.info(status)
if status in ['COMPLETE', 'FAILED']:
break
time.sleep(5)
finally:
if pid:
os.kill(pid, signal.SIGKILL)
if event_list_pid:
os.kill(event_list_pid, signal.SIGKILL)
stack_get = orchestration_client.stacks.get(stack_id)
status = stack_get.status
if status != 'FAILED':
pw_rsrc = orchestration_client.resources.get(
stack_id, 'DefaultPasswords')
passwords = {p.title().replace("_", ""): v for p, v in
pw_rsrc.attributes.get('passwords', {}).items()}
return passwords
else:
msg = "Stack create failed, reason: %s" % stack_get.reason
raise Exception(msg)
def _fork_heat_event_list(self):
pid = os.fork()
if pid == 0:
events_env = {
'OS_AUTH_URL': 'http://127.0.0.1:35358',
'OS_USERNAME': 'foo',
'OS_PROJECT_NAME': 'foo',
'OS_PASSWORD': '<PASSWORD>'
}
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
subprocess.check_call(['openstack', 'stack', 'event', 'list',
'undercloud', '--follow',
'--nested-depth', '6'], env=events_env)
sys.exit(0)
else:
return pid
def _fork_fake_keystone(self):
pid = os.fork()
if pid == 0:
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
fake_keystone.launch()
sys.exit(0)
else:
return pid
def _update_passwords_env(self, passwords=None):
pw_file = os.path.join(os.environ.get('HOME', ''),
'tripleo-undercloud-passwords.yaml')
stack_env = {'parameter_defaults': {}}
if os.path.exists(pw_file):
with open(pw_file) as pf:
stack_env = yaml.load(pf.read())
pw = password_utils.generate_passwords(stack_env=stack_env)
stack_env['parameter_defaults'].update(pw)
if passwords:
# These passwords are the DefaultPasswords so we only
# update if they don't already exist in stack_env
for p, v in passwords.items():
if p not in stack_env['parameter_defaults']:
stack_env['parameter_defaults'][p] = v
with open(pw_file, 'w') as pf:
yaml.safe_dump(stack_env, pf, default_flow_style=False)
return pw_file
def _generate_hosts_parameters(self):
hostname = self._get_hostname()
domain = 'undercloud'
data = {
'CloudName': hostname,
'CloudDomain': domain,
'CloudNameInternal': '%s.internalapi.%s' % (hostname, domain),
'CloudNameStorage': '%s.storage.%s' % (hostname, domain),
'CloudNameStorageManagement': ('%s.storagemgmt.%s'
% (hostname, domain)),
'CloudNameCtlplane': '%s.ctlplane.%s' % (hostname, domain),
}
return data
def _generate_portmap_parameters(self, ip_addr, cidr):
hostname = self._get_hostname()
data = {
'DeployedServerPortMap': {
('%s-ctlplane' % hostname): {
'fixed_ips': [{'ip_address': ip_addr}],
'subnets': [{'cidr': cidr}]
},
'control_virtual_ip': {
'fixed_ips': [{'ip_address': ip_addr}],
'subnets': [{'cidr': cidr}]
}
}
}
return data
def _deploy_tripleo_heat_templates(self, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
parameters = {}
tht_root = parsed_args.templates
# generate jinja templates
args = ['python', 'tools/process-templates.py', '--roles-data',
'roles_data_undercloud.yaml']
subprocess.check_call(args, cwd=tht_root)
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
environments = []
resource_registry_path = os.path.join(
tht_root, 'overcloud-resource-registry-puppet.yaml')
environments.insert(0, resource_registry_path)
# this will allow the user to overwrite passwords with custom envs
pw_file = self._update_passwords_env()
environments.insert(1, pw_file)
undercloud_env_path = os.path.join(
tht_root, 'environments', 'undercloud.yaml')
environments.append(undercloud_env_path)
# use deployed-server because we run os-collect-config locally
deployed_server_env = os.path.join(
tht_root, 'environments',
'deployed-server-noop-ctlplane.yaml')
environments.append(deployed_server_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
with tempfile.NamedTemporaryFile() as tmp_env_file:
tmp_env = self._generate_hosts_parameters()
ip_nw = netaddr.IPNetwork(parsed_args.local_ip)
ip = str(ip_nw.ip)
cidr = str(ip_nw.netmask)
tmp_env.update(self._generate_portmap_parameters(ip, cidr))
with open(tmp_env_file.name, 'w') as env_file:
yaml.safe_dump({'parameter_defaults': tmp_env}, env_file,
default_flow_style=False)
environments.append(tmp_env_file.name)
undercloud_yaml = os.path.join(tht_root, 'overcloud.yaml')
passwords = self._heat_deploy(parsed_args.stack, undercloud_yaml,
parameters, environments,
parsed_args.timeout,
parsed_args.heat_api_port,
parsed_args.fake_keystone_port)
if passwords:
# Get legacy passwords/secrets generated via heat
# These need to be written to the passwords file
# to avoid re-creating them every update
self._update_passwords_env(passwords)
return True
def get_parser(self, prog_name):
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
parser.add_argument(
'--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"),
)
parser.add_argument('--stack',
help=_("Stack name to create"),
default='undercloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=30,
help=_('Deployment timeout in minutes.'))
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--heat-api-port', metavar='<HEAT_API_PORT>',
dest='heat_api_port',
default='8006',
help=_('Heat API port to use for the installers private'
' Heat API instance. Optional. Default: 8006.)')
)
parser.add_argument(
'--fake-keystone-port', metavar='<FAKE_KEYSTONE_PORT>',
dest='fake_keystone_port',
default='35358',
help=_('Keystone API port to use for the installers private'
' fake Keystone API instance. Optional. Default: 35358.)')
)
parser.add_argument(
'--heat-user', metavar='<HEAT_USER>',
dest='heat_user',
default='heat',
help=_('User to execute the non-priveleged heat-all process. '
'Defaults to heat.')
)
parser.add_argument(
'--heat-container-image', metavar='<HEAT_CONTAINER_IMAGE>',
dest='heat_container_image',
default='tripleoupstream/centos-binary-heat-all',
help=_('The container image to use when launching the heat-all '
'process. Defaults to: '
'tripleoupstream/centos-binary-heat-all')
)
parser.add_argument(
'--heat-native',
action='store_true',
default=False,
help=_('Execute the heat-all process natively on this host. '
'This option requires that the heat-all binaries '
'be installed locally on this machine. '
'This option is off by default which means heat-all is '
'executed in a docker container.')
)
parser.add_argument(
'--local-ip', metavar='<LOCAL_IP>',
dest='local_ip',
help=_('Local IP/CIDR for undercloud traffic. Required.')
)
parser.add_argument(
'-k',
'--keep-running',
action='store_true',
dest='keep_running',
help=_('Keep the process running on failures for debugging')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
print("\nUndercloud deploy is an experimental developer focused "
"feature that does not yet replace "
"'openstack undercloud install'.")
if not parsed_args.local_ip:
print('Please set --local-ip to the correct ipaddress/cidr '
'for this machine.')
return
# NOTE(dprince): It would be nice if heat supported true 'noauth'
# use in a local format for our use case here (or perhaps dev testing)
# but until it does running our own lightweight shim to mock out
# the required API calls works just as well. To keep fake keystone
# light we run it in a thread.
if not os.environ.get('FAKE_KEYSTONE_PORT'):
os.environ['FAKE_KEYSTONE_PORT'] = parsed_args.fake_keystone_port
if not os.environ.get('HEAT_API_PORT'):
os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port
# The main thread runs as root and we drop privs for forked
# processes below. Only the heat deploy/os-collect-config forked
# process runs as root.
if os.geteuid() != 0:
raise exceptions.DeploymentError("Please run as root.")
# Install required packages
self._install_prerequisites()
keystone_pid = self._fork_fake_keystone()
# we do this as root to chown config files properly for docker, etc.
if parsed_args.heat_native:
heat_launch = heat_launcher.HeatNativeLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
else:
heat_launch = heat_launcher.HeatDockerLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
heat_pid = None
try:
# NOTE(dprince): we launch heat with fork exec because
# we don't want it to inherit our args. Launching heat
# as a "library" would be cool... but that would require
# more refactoring. It runs a single process and we kill
# it always below.
heat_pid = os.fork()
if heat_pid == 0:
os.setpgrp()
if parsed_args.heat_native:
try:
uid = pwd.getpwnam(parsed_args.heat_user).pw_uid
gid = pwd.getpwnam(parsed_args.heat_user).pw_gid
except KeyError:
raise exceptions.DeploymentError(
"Please create a %s user account before "
"proceeding." % parsed_args.heat_user)
os.setgid(gid)
os.setuid(uid)
heat_launch.heat_db_sync()
heat_launch.launch_heat()
else:
heat_launch.heat_db_sync()
heat_launch.launch_heat()
else:
self._wait_local_port_ready(parsed_args.fake_keystone_port)
self._wait_local_port_ready(parsed_args.heat_api_port)
if self._deploy_tripleo_heat_templates(parsed_args):
print("\nDeploy Successful.")
else:
print("\nUndercloud deployment failed: "
"press ctrl-c to exit.")
while parsed_args.keep_running:
try:
time.sleep(1)
except KeyboardInterrupt:
break
raise exceptions.DeploymentError("Stack create failed.")
finally:
if heat_launch:
print('Log files at: %s' % heat_launch.install_tmp)
heat_launch.kill_heat(heat_pid)
if keystone_pid:
os.kill(keystone_pid, signal.SIGKILL)
|
[
"tripleo_common.utils.passwords.generate_passwords",
"yaml.safe_dump",
"logging.getLogger",
"openstackclient.i18n._",
"os.path.join",
"subprocess.check_call",
"os.path.abspath",
"os.path.exists",
"heatclient.common.template_utils.process_multiple_environments_and_files",
"urllib.request.urlopen",
"os.setuid",
"os.fork",
"netaddr.IPNetwork",
"pwd.getpwnam",
"os.setpgrp",
"heatclient.common.template_utils.get_template_contents",
"subprocess.Popen",
"time.sleep",
"os.setgid",
"sys.exit",
"tempfile.NamedTemporaryFile",
"tripleoclient.fake_keystone.launch",
"os.environ.get",
"os.kill",
"tripleoclient.heat_launcher.HeatNativeLauncher",
"tripleoclient.heat_launcher.HeatDockerLauncher",
"os.geteuid",
"os.execvp",
"tripleoclient.exceptions.DeploymentError"
] |
[((2155, 2204), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.DeployUndercloud')"], {}), "(__name__ + '.DeployUndercloud')\n", (2172, 2204), False, 'import logging\n'), ((2311, 2371), 'subprocess.Popen', 'subprocess.Popen', (["['hostname', '-s']"], {'stdout': 'subprocess.PIPE'}), "(['hostname', '-s'], stdout=subprocess.PIPE)\n", (2327, 2371), False, 'import subprocess\n'), ((3673, 4000), 'os.execvp', 'os.execvp', (['"""os-collect-config"""', "['os-collect-config', '--polling-interval', '3', '--heat-auth-url', \n 'http://127.0.0.1:%s/v3' % keystone_port, '--heat-password',\n '<PASSWORD>', '--heat-user-id', 'admin', '--heat-project-id', 'admin',\n '--heat-stack-id', stack_id, '--heat-resource-name', 'deployed-server',\n 'heat']"], {}), "('os-collect-config', ['os-collect-config', '--polling-interval',\n '3', '--heat-auth-url', 'http://127.0.0.1:%s/v3' % keystone_port,\n '--heat-password', '<PASSWORD>', '--heat-user-id', 'admin',\n '--heat-project-id', 'admin', '--heat-stack-id', stack_id,\n '--heat-resource-name', 'deployed-server', 'heat'])\n", (3682, 4000), False, 'import os\n'), ((4785, 4853), 'heatclient.common.template_utils.process_multiple_environments_and_files', 'template_utils.process_multiple_environments_and_files', (['environments'], {}), '(environments)\n', (4839, 4853), False, 'from heatclient.common import template_utils\n'), ((4960, 5011), 'heatclient.common.template_utils.get_template_contents', 'template_utils.get_template_contents', (['template_path'], {}), '(template_path)\n', (4996, 5011), False, 'from heatclient.common import template_utils\n'), ((8141, 8150), 'os.fork', 'os.fork', ([], {}), '()\n', (8148, 8150), False, 'import os\n'), ((9057, 9066), 'os.fork', 'os.fork', ([], {}), '()\n', (9064, 9066), False, 'import os\n'), ((9765, 9788), 'os.path.exists', 'os.path.exists', (['pw_file'], {}), '(pw_file)\n', (9779, 9788), False, 'import os\n'), ((9891, 9945), 'tripleo_common.utils.passwords.generate_passwords', 'password_utils.generate_passwords', ([], {'stack_env': 'stack_env'}), '(stack_env=stack_env)\n', (9924, 9945), True, 'from tripleo_common.utils import passwords as password_utils\n'), ((11914, 11955), 'subprocess.check_call', 'subprocess.check_call', (['args'], {'cwd': 'tht_root'}), '(args, cwd=tht_root)\n', (11935, 11955), False, 'import subprocess\n'), ((12175, 12240), 'os.path.join', 'os.path.join', (['tht_root', '"""overcloud-resource-registry-puppet.yaml"""'], {}), "(tht_root, 'overcloud-resource-registry-puppet.yaml')\n", (12187, 12240), False, 'import os\n'), ((12503, 12560), 'os.path.join', 'os.path.join', (['tht_root', '"""environments"""', '"""undercloud.yaml"""'], {}), "(tht_root, 'environments', 'undercloud.yaml')\n", (12515, 12560), False, 'import os\n'), ((12725, 12801), 'os.path.join', 'os.path.join', (['tht_root', '"""environments"""', '"""deployed-server-noop-ctlplane.yaml"""'], {}), "(tht_root, 'environments', 'deployed-server-noop-ctlplane.yaml')\n", (12737, 12801), False, 'import os\n'), ((3104, 3163), 'subprocess.check_call', 'subprocess.check_call', (["(['yum', '-y', 'install'] + processed)"], {}), "(['yum', '-y', 'install'] + processed)\n", (3125, 3163), False, 'import subprocess\n'), ((4241, 4254), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4251, 4254), False, 'import time\n'), ((6440, 6453), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6450, 6453), False, 'import time\n'), ((7005, 7014), 'os.fork', 'os.fork', ([], {}), '()\n', (7012, 7014), False, 'import os\n'), ((8748, 8879), 'subprocess.check_call', 'subprocess.check_call', (["['openstack', 'stack', 'event', 'list', 'undercloud', '--follow',\n '--nested-depth', '6']"], {'env': 'events_env'}), "(['openstack', 'stack', 'event', 'list', 'undercloud',\n '--follow', '--nested-depth', '6'], env=events_env)\n", (8769, 8879), False, 'import subprocess\n'), ((8958, 8969), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8966, 8969), False, 'import sys\n'), ((9442, 9464), 'tripleoclient.fake_keystone.launch', 'fake_keystone.launch', ([], {}), '()\n', (9462, 9464), False, 'from tripleoclient import fake_keystone\n'), ((9477, 9488), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9485, 9488), False, 'import sys\n'), ((9611, 9637), 'os.environ.get', 'os.environ.get', (['"""HOME"""', '""""""'], {}), "('HOME', '')\n", (9625, 9637), False, 'import os\n'), ((10363, 10418), 'yaml.safe_dump', 'yaml.safe_dump', (['stack_env', 'pf'], {'default_flow_style': '(False)'}), '(stack_env, pf, default_flow_style=False)\n', (10377, 10418), False, 'import yaml\n'), ((12996, 13025), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (13023, 13025), False, 'import tempfile\n'), ((13120, 13159), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['parsed_args.local_ip'], {}), '(parsed_args.local_ip)\n', (13137, 13159), False, 'import netaddr\n'), ((13574, 13614), 'os.path.join', 'os.path.join', (['tht_root', '"""overcloud.yaml"""'], {}), "(tht_root, 'overcloud.yaml')\n", (13586, 13614), False, 'import os\n'), ((18311, 18347), 'os.environ.get', 'os.environ.get', (['"""FAKE_KEYSTONE_PORT"""'], {}), "('FAKE_KEYSTONE_PORT')\n", (18325, 18347), False, 'import os\n'), ((18442, 18473), 'os.environ.get', 'os.environ.get', (['"""HEAT_API_PORT"""'], {}), "('HEAT_API_PORT')\n", (18456, 18473), False, 'import os\n'), ((18728, 18740), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (18738, 18740), False, 'import os\n'), ((18765, 18814), 'tripleoclient.exceptions.DeploymentError', 'exceptions.DeploymentError', (['"""Please run as root."""'], {}), "('Please run as root.')\n", (18791, 18814), False, 'from tripleoclient import exceptions\n'), ((19081, 19239), 'tripleoclient.heat_launcher.HeatNativeLauncher', 'heat_launcher.HeatNativeLauncher', (['parsed_args.heat_api_port', 'parsed_args.fake_keystone_port', 'parsed_args.heat_container_image', 'parsed_args.heat_user'], {}), '(parsed_args.heat_api_port, parsed_args.\n fake_keystone_port, parsed_args.heat_container_image, parsed_args.heat_user\n )\n', (19113, 19239), False, 'from tripleoclient import heat_launcher\n'), ((19335, 19493), 'tripleoclient.heat_launcher.HeatDockerLauncher', 'heat_launcher.HeatDockerLauncher', (['parsed_args.heat_api_port', 'parsed_args.fake_keystone_port', 'parsed_args.heat_container_image', 'parsed_args.heat_user'], {}), '(parsed_args.heat_api_port, parsed_args.\n fake_keystone_port, parsed_args.heat_container_image, parsed_args.heat_user\n )\n', (19367, 19493), False, 'from tripleoclient import heat_launcher\n'), ((19913, 19922), 'os.fork', 'os.fork', ([], {}), '()\n', (19920, 19922), False, 'import os\n'), ((2605, 2644), 'subprocess.check_call', 'subprocess.check_call', (["['rpm', '-q', p]"], {}), "(['rpm', '-q', p])\n", (2626, 2644), False, 'import subprocess\n'), ((4311, 4364), 'urllib.request.urlopen', 'urlopen', (["('http://127.0.0.1:%s/' % api_port)"], {'timeout': '(1)'}), "('http://127.0.0.1:%s/' % api_port, timeout=1)\n", (4318, 4364), False, 'from urllib.request import urlopen\n'), ((7454, 7482), 'os.kill', 'os.kill', (['pid', 'signal.SIGKILL'], {}), '(pid, signal.SIGKILL)\n', (7461, 7482), False, 'import os\n'), ((7530, 7569), 'os.kill', 'os.kill', (['event_list_pid', 'signal.SIGKILL'], {}), '(event_list_pid, signal.SIGKILL)\n', (7537, 7569), False, 'import os\n'), ((8427, 8439), 'os.setpgrp', 'os.setpgrp', ([], {}), '()\n', (8437, 8439), False, 'import os\n'), ((9121, 9133), 'os.setpgrp', 'os.setpgrp', ([], {}), '()\n', (9131, 9133), False, 'import os\n'), ((12034, 12059), 'os.path.abspath', 'os.path.abspath', (['tht_root'], {}), '(tht_root)\n', (12049, 12059), False, 'import os\n'), ((13377, 13464), 'yaml.safe_dump', 'yaml.safe_dump', (["{'parameter_defaults': tmp_env}", 'env_file'], {'default_flow_style': '(False)'}), "({'parameter_defaults': tmp_env}, env_file,\n default_flow_style=False)\n", (13391, 13464), False, 'import yaml\n'), ((14576, 14634), 'openstackclient.i18n._', '_', (['"""The directory containing the Heat templates to deploy"""'], {}), "('The directory containing the Heat templates to deploy')\n", (14577, 14634), False, 'from openstackclient.i18n import _\n'), ((14718, 14743), 'openstackclient.i18n._', '_', (['"""Stack name to create"""'], {}), "('Stack name to create')\n", (14719, 14743), False, 'from openstackclient.i18n import _\n'), ((14946, 14981), 'openstackclient.i18n._', '_', (['"""Deployment timeout in minutes."""'], {}), "('Deployment timeout in minutes.')\n", (14947, 14981), False, 'from openstackclient.i18n import _\n'), ((15159, 15289), 'openstackclient.i18n._', '_', (['"""Environment files to be passed to the heat stack-create or heat stack-update command. (Can be specified more than once.)"""'], {}), "('Environment files to be passed to the heat stack-create or heat stack-update command. (Can be specified more than once.)'\n )\n", (15160, 15289), False, 'from openstackclient.i18n import _\n'), ((15505, 15607), 'openstackclient.i18n._', '_', (['"""Heat API port to use for the installers private Heat API instance. Optional. Default: 8006.)"""'], {}), "('Heat API port to use for the installers private Heat API instance. Optional. Default: 8006.)'\n )\n", (15506, 15607), False, 'from openstackclient.i18n import _\n'), ((15817, 15933), 'openstackclient.i18n._', '_', (['"""Keystone API port to use for the installers private fake Keystone API instance. Optional. Default: 35358.)"""'], {}), "('Keystone API port to use for the installers private fake Keystone API instance. Optional. Default: 35358.)'\n )\n", (15818, 15933), False, 'from openstackclient.i18n import _\n'), ((16115, 16190), 'openstackclient.i18n._', '_', (['"""User to execute the non-priveleged heat-all process. Defaults to heat."""'], {}), "('User to execute the non-priveleged heat-all process. Defaults to heat.')\n", (16116, 16190), False, 'from openstackclient.i18n import _\n'), ((16444, 16569), 'openstackclient.i18n._', '_', (['"""The container image to use when launching the heat-all process. Defaults to: tripleoupstream/centos-binary-heat-all"""'], {}), "('The container image to use when launching the heat-all process. Defaults to: tripleoupstream/centos-binary-heat-all'\n )\n", (16445, 16569), False, 'from openstackclient.i18n import _\n'), ((16754, 16987), 'openstackclient.i18n._', '_', (['"""Execute the heat-all process natively on this host. This option requires that the heat-all binaries be installed locally on this machine. This option is off by default which means heat-all is executed in a docker container."""'], {}), "('Execute the heat-all process natively on this host. This option requires that the heat-all binaries be installed locally on this machine. This option is off by default which means heat-all is executed in a docker container.'\n )\n", (16755, 16987), False, 'from openstackclient.i18n import _\n'), ((17204, 17256), 'openstackclient.i18n._', '_', (['"""Local IP/CIDR for undercloud traffic. Required."""'], {}), "('Local IP/CIDR for undercloud traffic. Required.')\n", (17205, 17256), False, 'from openstackclient.i18n import _\n'), ((17427, 17482), 'openstackclient.i18n._', '_', (['"""Keep the process running on failures for debugging"""'], {}), "('Keep the process running on failures for debugging')\n", (17428, 17482), False, 'from openstackclient.i18n import _\n'), ((19969, 19981), 'os.setpgrp', 'os.setpgrp', ([], {}), '()\n', (19979, 19981), False, 'import os\n'), ((21618, 21655), 'os.kill', 'os.kill', (['keystone_pid', 'signal.SIGKILL'], {}), '(keystone_pid, signal.SIGKILL)\n', (21625, 21655), False, 'import os\n'), ((7386, 7399), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (7396, 7399), False, 'import time\n'), ((8605, 8696), 'tripleoclient.exceptions.DeploymentError', 'exceptions.DeploymentError', (['"""Please create a \'nobody\' user account before proceeding."""'], {}), '(\n "Please create a \'nobody\' user account before proceeding.")\n', (8631, 8696), False, 'from tripleoclient import exceptions\n'), ((9299, 9390), 'tripleoclient.exceptions.DeploymentError', 'exceptions.DeploymentError', (['"""Please create a \'nobody\' user account before proceeding."""'], {}), '(\n "Please create a \'nobody\' user account before proceeding.")\n', (9325, 9390), False, 'from tripleoclient import exceptions\n'), ((20449, 20463), 'os.setgid', 'os.setgid', (['gid'], {}), '(gid)\n', (20458, 20463), False, 'import os\n'), ((20484, 20498), 'os.setuid', 'os.setuid', (['uid'], {}), '(uid)\n', (20493, 20498), False, 'import os\n'), ((21360, 21410), 'tripleoclient.exceptions.DeploymentError', 'exceptions.DeploymentError', (['"""Stack create failed."""'], {}), "('Stack create failed.')\n", (21386, 21410), False, 'from tripleoclient import exceptions\n'), ((8466, 8488), 'pwd.getpwnam', 'pwd.getpwnam', (['"""nobody"""'], {}), "('nobody')\n", (8478, 8488), False, 'import pwd\n'), ((8523, 8545), 'pwd.getpwnam', 'pwd.getpwnam', (['"""nobody"""'], {}), "('nobody')\n", (8535, 8545), False, 'import pwd\n'), ((9160, 9182), 'pwd.getpwnam', 'pwd.getpwnam', (['"""nobody"""'], {}), "('nobody')\n", (9172, 9182), False, 'import pwd\n'), ((9217, 9239), 'pwd.getpwnam', 'pwd.getpwnam', (['"""nobody"""'], {}), "('nobody')\n", (9229, 9239), False, 'import pwd\n'), ((20081, 20116), 'pwd.getpwnam', 'pwd.getpwnam', (['parsed_args.heat_user'], {}), '(parsed_args.heat_user)\n', (20093, 20116), False, 'import pwd\n'), ((20154, 20189), 'pwd.getpwnam', 'pwd.getpwnam', (['parsed_args.heat_user'], {}), '(parsed_args.heat_user)\n', (20166, 20189), False, 'import pwd\n'), ((20264, 20378), 'tripleoclient.exceptions.DeploymentError', 'exceptions.DeploymentError', (["('Please create a %s user account before proceeding.' % parsed_args.heat_user)"], {}), "(\n 'Please create a %s user account before proceeding.' % parsed_args.\n heat_user)\n", (20290, 20378), False, 'from tripleoclient import exceptions\n'), ((21235, 21248), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (21245, 21248), False, 'import time\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
This module subsets the certain number of important features
and detects student behavior and grouping students
"""
# Load libraries
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
def subset_important_features(data_list, num_of_features, option):
"""
Subset the certain number of statistically significant features
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
num_of_features: The number of features that a user wants to subset
option: Different ways to subset the data_list
'common': Subset common significant features across all sessions
'different': Subset significant features from each session
Return
----------
A list containing subsetted pandas dataframes
"""
if not isinstance(num_of_features, int) is True:
raise ValueError("'num_of_features' should be an integer.")
if not isinstance(option, str) is True:
raise ValueError("'option' should be a string ('common' or 'different').")
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not num_of_features >= 2:
raise ValueError("'num_of_features' should be greater than 2.")
else:
# Subset common significant features across all sessions
if option == 'different':
important_features = []
results_list = [0]*len(data_list)
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['ID', 'Y'])
outcome = session['Y']
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(ivs, outcome)
feat_importances = pd.Series(clf.feature_importances_, index=ivs.columns)
features = feat_importances.nlargest(num_of_features).index[0:num_of_features]
important_features.append(pd.DataFrame(features, columns=['session' + str(i+1)]))
important_features[i-1].loc[num_of_features] = ['Y']
important_features[i-1].loc[num_of_features+1] = ['ID']
for i, session in enumerate(data_list):
if i == 0:
continue
results_list[i] = session[important_features[i-1]['session'+str(i+1)]]
return results_list
# Subset significant features from each session
elif option == 'common':
num = 0
j = 10
results_list = [0]*len(data_list)
while num < num_of_features-1:
important_features = []
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['ID', 'Y'])
outcome = session['Y']
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(ivs, outcome)
feat_importances = pd.Series(clf.feature_importances_, index=ivs.columns)
features = feat_importances.nlargest(j).index[0:j]
important_features.append(features)
common_features = list(set.intersection(*map(set, important_features)))
num = len(common_features)
j += 1
if len(common_features) > num_of_features-1:
common_features = common_features[0:num_of_features-1]
for i, session in enumerate(data_list):
if i == 0:
continue
common_features.append('ID')
common_features.append('MID'+str(i+1))
results_list[i] = session[common_features]
del common_features[-2:]
if len(common_features) == num_of_features-1:
for i, session in enumerate(data_list):
if i == 0:
continue
common_features.append('ID')
common_features.append('MID'+str(i+1))
results_list[i] = session[common_features]
del common_features[-2:]
return results_list
def machine_learning_model(data_list, ml_model):
"""
Fit a machine learning model
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
ml_model: A machine learning model that a user wants to fit
'KNN': K-nearest neighbors
'DT': Decision tree
'RF': Randome forest
'NB': Naive Bayes
'LR': Logistic regression
'SVC': Support vector classifier
Return
----------
A list containing pandas dataframes including features
and a fitted result from a machine learning model
"""
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not isinstance(ml_model, str) is True:
raise ValueError("'ml_model' should be a string and \
one of machine learning models ('KNN', 'DT', 'RF', 'NB', 'LR', 'SVC').")
else:
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['Y'])
outcome = session['Y']
# Fit the K-nearest neighbors
if ml_model == 'KNN':
cknn = KNeighborsClassifier(n_neighbors=10, metric='minkowski',
p=2).fit(ivs, outcome)
predict = cknn.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the decision tree
elif ml_model == 'DT':
cdt = DecisionTreeClassifier(criterion='entropy').fit(ivs, outcome)
predict = cdt.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the random forest
elif ml_model == 'RF':
crf = RandomForestClassifier(n_estimators=10,
criterion='entropy').fit(ivs, outcome)
predict = crf.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the naive bayes
elif ml_model == 'NB':
cnb = GaussianNB().fit(ivs, outcome)
predict = cnb.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the logistic regression
elif ml_model == 'LR':
clr = LogisticRegression(solver='liblinear').fit(ivs, outcome)
predict = clr.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fir the support vector classfier
elif ml_model == 'SVC':
csvm = SVC(kernel='rbf', random_state=0).fit(ivs, outcome)
predict = csvm.predict(ivs)
session = session.assign(Predicted_Y=predict)
data_list[i] = session
return data_list
def kmean_clustering(data_list, num_of_sessions, num_of_clusters):
"""
Fit the k-means clustering
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
num_of_sessions: The timing when a user wants to form a group
num_of_clusters: The number of clusters that a user wants to form
Return
----------
A list containing pandas dataframes including features
and results from the k-mean clustering
"""
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not isinstance(num_of_sessions, int) is True:
raise ValueError("'num_of_sessions' should be an integer.")
if not isinstance(num_of_sessions, int) is True:
raise ValueError("'num_of_clusters' should be an integer.")
if not num_of_sessions >= 2:
raise ValueError("'num_of_sessions' should be greater than 2.")
else:
new_data_list = [0]*5
for i in range(0, num_of_sessions):
if i == 0:
continue
# k-mean clustering for the session 2
if i == 1:
new_data_list[i-1] = data_list[i]
kmeans = KMeans(n_clusters=num_of_clusters, init='k-means++',
max_iter=300, n_init=10)
kmeans.fit(new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID'])
y_pred = kmeans.fit_predict(new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID'])
new_data_list[i-1] = new_data_list[i-1].assign(group=y_pred)
# k-mean clustering for the session 3-6
if 2 <= i <= 5:
logs = data_list[i].columns[0:len(data_list[i].columns)-2]
new_data_list[i-1] = new_data_list[i-2].merge(data_list[i], how="outer", on=['ID'])
# Calculate current intermediate (mid) scores with previous scores
mid_cols = [col for col in new_data_list[i-1].columns if col.startswith('MID')]
new_data_list[i-1]['MID_Mean'] = new_data_list[i-1][mid_cols].mean(axis=1)
# Calculate current intermediate (mid) log feature(s) with previous log feature(s)
j = 0
all_log_cols = []
while j < len(logs):
log_cols = [col for col in new_data_list[i-1].columns
if col.startswith(logs[j])]
all_log_cols.append(logs[j]+'_Mean')
new_data_list[i-1][logs[j]+'_Mean'] = new_data_list[i-1][log_cols].mean(axis=1)
j += 1
cols_collection = all_log_cols+['ID', 'MID_Mean']
new_data_list[i-1] = new_data_list[i-1][cols_collection]
kmeans = KMeans(n_clusters=num_of_clusters, init='k-means++',
max_iter=300, n_init=10)
data_for_fitting = new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID']
kmeans.fit(data_for_fitting)
y_pred = kmeans.fit_predict(data_for_fitting)
new_data_list[i-1] = new_data_list[i-1].assign(group=y_pred)
return new_data_list[num_of_sessions-2]
def main():
print('Done!')
if __name__ == '__main__':
main()
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.cluster.KMeans",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"pandas.Series",
"sklearn.svm.SVC"
] |
[((2150, 2187), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (2170, 2187), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((2269, 2323), 'pandas.Series', 'pd.Series', (['clf.feature_importances_'], {'index': 'ivs.columns'}), '(clf.feature_importances_, index=ivs.columns)\n', (2278, 2323), True, 'import pandas as pd\n'), ((9097, 9174), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_of_clusters', 'init': '"""k-means++"""', 'max_iter': '(300)', 'n_init': '(10)'}), "(n_clusters=num_of_clusters, init='k-means++', max_iter=300, n_init=10)\n", (9103, 9174), False, 'from sklearn.cluster import KMeans\n'), ((10707, 10784), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_of_clusters', 'init': '"""k-means++"""', 'max_iter': '(300)', 'n_init': '(10)'}), "(n_clusters=num_of_clusters, init='k-means++', max_iter=300, n_init=10)\n", (10713, 10784), False, 'from sklearn.cluster import KMeans\n'), ((3415, 3452), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (3435, 3452), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((3542, 3596), 'pandas.Series', 'pd.Series', (['clf.feature_importances_'], {'index': 'ivs.columns'}), '(clf.feature_importances_, index=ivs.columns)\n', (3551, 3596), True, 'import pandas as pd\n'), ((6132, 6193), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(10)', 'metric': '"""minkowski"""', 'p': '(2)'}), "(n_neighbors=10, metric='minkowski', p=2)\n", (6152, 6193), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6461, 6504), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""'}), "(criterion='entropy')\n", (6483, 6504), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((6726, 6786), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'criterion': '"""entropy"""'}), "(n_estimators=10, criterion='entropy')\n", (6748, 6786), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7052, 7064), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (7062, 7064), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((7292, 7330), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""'}), "(solver='liblinear')\n", (7310, 7330), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7565, 7598), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'random_state': '(0)'}), "(kernel='rbf', random_state=0)\n", (7568, 7598), False, 'from sklearn.svm import SVC\n')]
|
"""Symbolic model code generation.
Improvement ideas
-----------------
* Add compiled code to linecache so that tracebacks can be produced, like done
in the `IPython.core.compilerop` module.
"""
import abc
import collections
import collections.abc
import contextlib
import functools
import inspect
import itertools
import re
import types
import attrdict
import numpy as np
import jinja2
import sympy
from . import function, printing, utils, var
class Variables(var.SymbolObject):
"""Represents code generation model variables."""
pass
class Base:
"""Code generation model base."""
def __init__(self):
self.variables = Variables(self={})
"""Model variables dictionary."""
self.derivatives = {}
"""Dictionary of model derivatives, to optimize higher order diff."""
def __getattribute__(self, name):
"""Overloaded method to bind SymbolicSubsFunction objects."""
attr = super().__getattribute__(name)
if isinstance(attr, function.SymbolicSubsFunction) and attr.ismethod:
return functools.partial(attr, self)
else:
return attr
def _compute_derivative(self, fname, wrt):
assert isinstance(wrt, tuple)
if wrt == ():
return self.default_function_output(fname)
# See if the derivative is registered
dname = self.derivatives.get((fname,) + wrt)
if dname is not None:
return self.default_function_output(dname)
expr = self._compute_derivative(fname, wrt[1:])
wrt_array = self.variables[wrt[0]]
return utils.ndexpr_diff(expr, wrt_array)
def add_derivative(self, fname, wrt, dname):
if utils.isstr(wrt):
wrt = (wrt,)
elif not isinstance(wrt, tuple):
raise TypeError("argument wrt must be string or tuple")
args = self.function_codegen_arguments(fname, include_self=True)
expr = self._compute_derivative(fname, wrt)
deriv = function.SymbolicSubsFunction(args, expr)
setattr(self, dname, deriv)
self.derivatives[(fname,) + wrt] = dname
def set_default_members(self):
for key, val in self.variables['self'].items():
setattr(self, key, val)
@contextlib.contextmanager
def using_default_members(self):
"""Context manager that sets default attributes temporarily."""
set_members = {}
unset_members = []
# Get the values of the members before the entering the context
for k in self.variables['self'].keys():
try:
set_members[k] = getattr(self, k)
except AttributeError:
unset_members.append(k)
try:
# Set the members to their "default" values
self.set_default_members()
yield
finally:
# Restore previous values
for key, val in set_members.items():
setattr(self, key, val)
for key in unset_members:
delattr(self, key)
def function_codegen_arguments(self, fname, include_self=False):
f = getattr(self, fname)
param_names = inspect.signature(f).parameters.keys()
if include_self:
param_names = ['self', *param_names]
return function.Arguments((n,self.variables[n]) for n in param_names)
@utils.cached_method
def default_function_output(self, fname):
"""Function output for the default arguments."""
f = getattr(self, fname)
if isinstance(f, functools.partial):
if isinstance(f.func, function.SymbolicSubsFunction):
return f.func.default_output
args = self.function_codegen_arguments(fname)
with self.using_default_members():
return np.asarray(f(*args.values()))
def print_code(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.print_class()
def compile_class(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.class_obj()
def print_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.print_class()
def compile_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.class_obj()
model_template_src = '''\
# Model imports
import numpy as {{printer.numpy_alias}}
{% for import in m.imports -%}
import {{ import }}
{% endfor %}
class {{m.name}}({{ m.bases | join(', ') }}, metaclass={{m.metaclass}}):
"""Generated code for {{m.name}} from symbolic model."""
{% for method in m.methods %}
{{ method | indent }}
{% endfor %}
{% for name, value in m.assignments.items() -%}
{% if isndarray(value) -%}
{{ printer.print_ndarray(value, assign_to=name) }}
{% else -%}
{{ name }} = {{ value }}
{% endif -%}
{% endfor %}
'''
class ModelPrinter:
"""Generates numpy code for symbolic models."""
@utils.cached_class_property
def template(cls):
return jinja2.Template(model_template_src)
def __init__(self, model, **options):
self.model = model
"""The underlying symbolic model."""
self.options = options
"""Model printer options."""
try:
functions = options['functions']
except KeyError:
functions = getattr(model, 'generate_functions', [])
f_specs = []
for fname in functions:
output = self.model.default_function_output(fname)
arguments = self.model.function_codegen_arguments(fname, True)
f_specs.append((fname, output, arguments))
self._f_specs = f_specs
"""Function generation specifications."""
@property
def name(self):
"""Name of the generated class."""
return (getattr(self.model, 'generated_name', None)
or self.options.get('name', None)
or f'Generated{type(self.model).__name__}')
@property
def assignments(self):
"""Mapping of simple assignments to be made in the class code."""
try:
return self.options['assignments']
except KeyError:
return getattr(self.model, 'generate_assignments', {})
@property
def imports(self):
"""List of imports to include in the generated class code."""
try:
return self.options['imports']
except KeyError:
return getattr(self.model, 'generate_imports', [])
@property
def bases(self):
"""List of names of base classes for the generated model class."""
try:
return self.options['bases']
except KeyError:
return getattr(self.model, 'generated_bases', ['object'])
@property
def metaclass(self):
"""Metaclass for the generated model class."""
try:
return self.options['metaclass']
except KeyError:
return getattr(self.model, 'generated_metaclass', 'type')
@property
def methods(self):
for fname, output, arguments in self._f_specs:
fdef = function.print_function(fname, output, arguments)
yield fdef
def print_class(self):
isndarray = lambda var: isinstance(var, np.ndarray)
context = dict(m=self, printer=printing.Printer(), isndarray=isndarray)
return self.template.render(context)
def class_obj(self):
env = {}
exec(compile(self.print_class(), '<string>', 'exec'), env)
return env[self.name]
def collect_symbols(f):
sig = inspect.signature(f)
if len(sig.parameters) < 2:
raise ValueError(f"method {f.__name__} should have at least 2 "
"parameters, 'self' and the collected symbols")
params = list(sig.parameters.values())
collected_symbols_arg_name = params[-1].name
new_sig = sig.replace(parameters=params[:-1])
nargs_wrapped = len(params) - 1
@functools.wraps(f)
def wrapper(self, *args):
# Validate arguments
nargs_in = len(args) + 1
if nargs_in != nargs_wrapped:
raise TypeError(f"{f.__name__} takes {nargs_wrapped} arguments "
f"but got only {nargs_in}")
# Create substitution dictionary
subs = self.variables['self'].subs_map(self)
for param, value in zip(params[1:-1], args):
subs.update(self.variables[param.name].subs_map(value))
# Create collected symbols AttrDict
collected_symbols = attrdict.AttrDict()
for var, expr in subs.items():
collected_symbols[var.name] = expr
ret = f(self, *args, **{collected_symbols_arg_name: collected_symbols})
# Ensure function return is an ndarray
return np.asarray(ret, object)
wrapper.__signature__ = new_sig
return wrapper
|
[
"jinja2.Template",
"functools.partial",
"numpy.asarray",
"inspect.signature",
"functools.wraps",
"attrdict.AttrDict"
] |
[((7770, 7790), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (7787, 7790), False, 'import inspect\n'), ((8156, 8174), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (8171, 8174), False, 'import functools\n'), ((5161, 5196), 'jinja2.Template', 'jinja2.Template', (['model_template_src'], {}), '(model_template_src)\n', (5176, 5196), False, 'import jinja2\n'), ((8743, 8762), 'attrdict.AttrDict', 'attrdict.AttrDict', ([], {}), '()\n', (8760, 8762), False, 'import attrdict\n'), ((9000, 9023), 'numpy.asarray', 'np.asarray', (['ret', 'object'], {}), '(ret, object)\n', (9010, 9023), True, 'import numpy as np\n'), ((1085, 1114), 'functools.partial', 'functools.partial', (['attr', 'self'], {}), '(attr, self)\n', (1102, 1114), False, 'import functools\n'), ((3242, 3262), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (3259, 3262), False, 'import inspect\n')]
|
from sys import argv, exit
import csv
import re
if len(argv) != 3:
print("Usage: dna.py data.csv sequence.txt")
exit(1)
elif re.match(".*\.csv$", argv[1]) is None or re.match(".*\.txt$", argv[2]) is None:
print("Usage: dna.py data.csv sequence.txt")
exit(2)
else:
# opening csvfile
with open(argv[1], "r") as csvfile:
database = csv.DictReader(csvfile)
# creating dictionary to hold value counts
# getting columns from first row
seq_count = dict(next(database))
# remove name key
del seq_count['name']
# count number of keys
key_count = len(seq_count)
# set values = 0
for key in seq_count:
seq_count[key] = 0
# checking sequence for repeating patterns
with open(argv[2], "r") as txtfile:
sequence = csv.reader(txtfile)
# getting sequence length
for row in sequence:
line = row[0]
length = len(line)
# getting pattern to check
for key in seq_count:
pat_len = len(key)
flag = 0
repeat_count = 0
repeats = [0]
# checking loop for pattern
i = 0
while (length - i) >= pat_len:
# getting slice to compare
section = line[i: pat_len + i]
# actions on match
if section == key:
flag = 1
repeat_count += 1
i += pat_len
else:
# if matches found append to list
if flag == 1:
repeats.append(repeat_count)
repeat_count = 0
flag = 0
i += 1
# otherwise just check from the next sequence
else:
i += 1
# set repeats value in dictionary
seq_count[key] = max(repeats)
# rewind file and check values in database
csvfile.seek(0)
next(csvfile)
for row in database:
checker = dict(row)
check_count = 0
for key in seq_count:
if seq_count[key] == int(checker[key]):
check_count += 1
if check_count == key_count:
print(row['name'])
exit(0)
print("No match")
exit(0)
|
[
"csv.DictReader",
"csv.reader",
"re.match",
"sys.exit"
] |
[((122, 129), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (126, 129), False, 'from sys import argv, exit\n'), ((268, 275), 'sys.exit', 'exit', (['(2)'], {}), '(2)\n', (272, 275), False, 'from sys import argv, exit\n'), ((2624, 2631), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2628, 2631), False, 'from sys import argv, exit\n'), ((135, 165), 're.match', 're.match', (['""".*\\\\.csv$"""', 'argv[1]'], {}), "('.*\\\\.csv$', argv[1])\n", (143, 165), False, 'import re\n'), ((176, 206), 're.match', 're.match', (['""".*\\\\.txt$"""', 'argv[2]'], {}), "('.*\\\\.txt$', argv[2])\n", (184, 206), False, 'import re\n'), ((363, 386), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (377, 386), False, 'import csv\n'), ((851, 870), 'csv.reader', 'csv.reader', (['txtfile'], {}), '(txtfile)\n', (861, 870), False, 'import csv\n'), ((2589, 2596), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2593, 2596), False, 'from sys import argv, exit\n')]
|
#!/usr/bin/env python3
# coding: utf-8
"""
@author: <NAME> <EMAIL>
@last modified by: <NAME>
@file:qc.py
@time:2021/03/26
"""
from scipy.sparse import issparse
import numpy as np
def cal_qc(data):
"""
calculate three qc index including the number of genes expressed in the count matrix, the total counts per cell
and the percentage of counts in mitochondrial genes.
:param data: the StereoExpData object.
:return: StereoExpData object storing quality control results.
"""
exp_matrix = data.exp_matrix
total_count = cal_total_counts(exp_matrix)
n_gene_by_count = cal_n_genes_by_counts(exp_matrix)
pct_counts_mt = cal_pct_counts_mt(data, exp_matrix, total_count)
data.cells.total_counts = total_count
data.cells.pct_counts_mt = pct_counts_mt
data.cells.n_genes_by_counts = n_gene_by_count
return data
def cal_total_counts(exp_matrix):
"""
calculate the total gene counts of per cell.
:param exp_matrix: the express matrix.
:return:
"""
total_count = np.array(exp_matrix.sum(1)).reshape(-1)
return total_count
def cal_per_gene_counts(exp_matrix):
"""
calculate the total counts of per gene.
:param exp_matrix: the express matrix.
:return:
"""
gene_counts = np.array(exp_matrix.sum(axis=0)).reshape(-1)
return gene_counts
def cal_n_cells_by_counts(exp_matrix):
"""
total counts of each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells_by_counts = np.array(exp_matrix.sum(0)).reshape(-1)
return n_cells_by_counts
def cal_n_cells(exp_matrix):
"""
Number of cells that occur in each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells = exp_matrix.getnnz(axis=0) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=0)
return n_cells
def cal_n_genes_by_counts(exp_matrix):
n_genes_by_counts = exp_matrix.getnnz(axis=1) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=1)
return n_genes_by_counts
def cal_pct_counts_mt(data, exp_matrix, total_count):
if total_count is None:
total_count = cal_total_counts(exp_matrix)
mt_index = np.char.startswith(np.char.lower(data.gene_names), prefix='mt-')
mt_count = np.array(exp_matrix[:, mt_index].sum(1)).reshape(-1)
pct_counts_mt = mt_count / total_count * 100
return pct_counts_mt
|
[
"scipy.sparse.issparse",
"numpy.char.lower",
"numpy.count_nonzero"
] |
[((1770, 1790), 'scipy.sparse.issparse', 'issparse', (['exp_matrix'], {}), '(exp_matrix)\n', (1778, 1790), False, 'from scipy.sparse import issparse\n'), ((1796, 1832), 'numpy.count_nonzero', 'np.count_nonzero', (['exp_matrix'], {'axis': '(0)'}), '(exp_matrix, axis=0)\n', (1812, 1832), True, 'import numpy as np\n'), ((1946, 1966), 'scipy.sparse.issparse', 'issparse', (['exp_matrix'], {}), '(exp_matrix)\n', (1954, 1966), False, 'from scipy.sparse import issparse\n'), ((1972, 2008), 'numpy.count_nonzero', 'np.count_nonzero', (['exp_matrix'], {'axis': '(1)'}), '(exp_matrix, axis=1)\n', (1988, 2008), True, 'import numpy as np\n'), ((2207, 2237), 'numpy.char.lower', 'np.char.lower', (['data.gene_names'], {}), '(data.gene_names)\n', (2220, 2237), True, 'import numpy as np\n')]
|
from django import template
from ..utils import sanitize_richtext
register = template.Library()
@register.filter
def baseplugin_pluginid(plugin_object):
return 'data-plugin-id="%s"' % plugin_object.pk
@register.filter
def baseplugin_sanitize_richtext(text):
return sanitize_richtext(text)
|
[
"django.template.Library"
] |
[((79, 97), 'django.template.Library', 'template.Library', ([], {}), '()\n', (95, 97), False, 'from django import template\n')]
|
import pandas as pd
import numpy as np
def load_cancer():
# data, target, feature_names
result_dict = {'features': np.array(["Clump Thickness",
"Uniformity of Cell Size",
"Uniformity of Cell Shape",
"Marginal Adhesion",
"Single Epithelial Cell Size",
"Bare Nuclei",
"Bland Chromatin",
"Normal Nucleoli",
"Mitoses"])}
df_dict = pd.read_csv('breast_cancer_wisconsin.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, :-1]
result_dict['target'] = df_data[:, -1]
return result_dict
def load_hepatitis():
result_dict = {'features': np.array(["AGE",
"SEX",
"STEROID",
"ANTIVIRAL",
"FATIGUE",
"MALAISE",
"ANOREXIA",
"LIVER BIG",
"LIVER FIRM",
"SPLEEN PALPABLE",
"SPIDERS",
"ASCITES",
"VARICES",
"BILIRUBIN",
"ALK PHOSPHATE",
"SGOT",
"ALBUMIN",
"PROTIME",
"HISTOLOGY"])
}
df_dict = pd.read_csv('hepatitis.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, 1:]
result_dict['target'] = (df_data[:, 0]).astype(int)
return result_dict
|
[
"pandas.read_csv",
"numpy.array"
] |
[((754, 779), 'numpy.array', 'np.array', (["df_dict['data']"], {}), "(df_dict['data'])\n", (762, 779), True, 'import numpy as np\n'), ((2022, 2047), 'numpy.array', 'np.array', (["df_dict['data']"], {}), "(df_dict['data'])\n", (2030, 2047), True, 'import numpy as np\n'), ((125, 337), 'numpy.array', 'np.array', (["['Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',\n 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei',\n 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses']"], {}), "(['Clump Thickness', 'Uniformity of Cell Size',\n 'Uniformity of Cell Shape', 'Marginal Adhesion',\n 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',\n 'Normal Nucleoli', 'Mitoses'])\n", (133, 337), True, 'import numpy as np\n'), ((943, 1189), 'numpy.array', 'np.array', (["['AGE', 'SEX', 'STEROID', 'ANTIVIRAL', 'FATIGUE', 'MALAISE', 'ANOREXIA',\n 'LIVER BIG', 'LIVER FIRM', 'SPLEEN PALPABLE', 'SPIDERS', 'ASCITES',\n 'VARICES', 'BILIRUBIN', 'ALK PHOSPHATE', 'SGOT', 'ALBUMIN', 'PROTIME',\n 'HISTOLOGY']"], {}), "(['AGE', 'SEX', 'STEROID', 'ANTIVIRAL', 'FATIGUE', 'MALAISE',\n 'ANOREXIA', 'LIVER BIG', 'LIVER FIRM', 'SPLEEN PALPABLE', 'SPIDERS',\n 'ASCITES', 'VARICES', 'BILIRUBIN', 'ALK PHOSPHATE', 'SGOT', 'ALBUMIN',\n 'PROTIME', 'HISTOLOGY'])\n", (951, 1189), True, 'import numpy as np\n'), ((670, 722), 'pandas.read_csv', 'pd.read_csv', (['"""breast_cancer_wisconsin.csv"""'], {'header': '(0)'}), "('breast_cancer_wisconsin.csv', header=0)\n", (681, 722), True, 'import pandas as pd\n'), ((1952, 1990), 'pandas.read_csv', 'pd.read_csv', (['"""hepatitis.csv"""'], {'header': '(0)'}), "('hepatitis.csv', header=0)\n", (1963, 1990), True, 'import pandas as pd\n')]
|
import pyshark
cap = pyshark.FileCapture('drox.pcapng')
key = b'xord'
for packet in cap:
try:
data = bytes([int(x, 16) for x in packet.tcp.payload.split(":")])
r = range(max(len(key), len(data)))
print(''.join([chr((key[i%len(key)]) ^ (data[i])) for i in r]))
except Exception as e:
print(e)
|
[
"pyshark.FileCapture"
] |
[((21, 55), 'pyshark.FileCapture', 'pyshark.FileCapture', (['"""drox.pcapng"""'], {}), "('drox.pcapng')\n", (40, 55), False, 'import pyshark\n')]
|
import pytest
from enphaseAI.problem1 import find_lines_from_points, find_lines_intersection
def test_find_lines_from_points() -> None:
p0 = 0., "string_input"
p1 = 1., 2.5
# Test for string input
args = [p0, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
# Test for same points
args = [p1, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
# Test for int inputs
p0 = 0, 1
p1 = 2, 4
args = [p0, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
p0 = 0., 0.
p1 = 1., 1.
a, b = find_lines_from_points(p0, p1)
assert a == 1., "Slope should be 1"
assert b == 0., "Intersect should be 0"
p0 = 0., 0.
p1 = 1., -1.
a, b = find_lines_from_points(p0, p1)
assert a == -1., "Slope should be 1"
assert b == 0., "Intersect should be 0"
def test_find_lines_intersection() -> None:
l0 = 0., "string_input"
l1 = 1., 2.5
# Test for string input
args = [l0, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
# Test for same lines
args = [l1, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
# Test for int inputs
l0 = 0, 1
l1 = 2, 4
args = [l0, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
l0 = 1., 0.
l1 = -1., 0.
args = [l0, l1]
x, y = find_lines_intersection(l0, l1)
assert x == 0. and y == 0., "The intersection should be the origin"
|
[
"enphaseAI.problem1.find_lines_intersection",
"pytest.raises",
"enphaseAI.problem1.find_lines_from_points"
] |
[((246, 306), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_from_points', '*args'], {}), '(AssertionError, find_lines_from_points, *args)\n', (259, 306), False, 'import pytest\n'), ((363, 423), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_from_points', '*args'], {}), '(AssertionError, find_lines_from_points, *args)\n', (376, 423), False, 'import pytest\n'), ((509, 569), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_from_points', '*args'], {}), '(AssertionError, find_lines_from_points, *args)\n', (522, 569), False, 'import pytest\n'), ((620, 650), 'enphaseAI.problem1.find_lines_from_points', 'find_lines_from_points', (['p0', 'p1'], {}), '(p0, p1)\n', (642, 650), False, 'from enphaseAI.problem1 import find_lines_from_points, find_lines_intersection\n'), ((786, 816), 'enphaseAI.problem1.find_lines_from_points', 'find_lines_from_points', (['p0', 'p1'], {}), '(p0, p1)\n', (808, 816), False, 'from enphaseAI.problem1 import find_lines_from_points, find_lines_intersection\n'), ((1057, 1118), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_intersection', '*args'], {}), '(AssertionError, find_lines_intersection, *args)\n', (1070, 1118), False, 'import pytest\n'), ((1174, 1235), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_intersection', '*args'], {}), '(AssertionError, find_lines_intersection, *args)\n', (1187, 1235), False, 'import pytest\n'), ((1321, 1382), 'pytest.raises', 'pytest.raises', (['AssertionError', 'find_lines_intersection', '*args'], {}), '(AssertionError, find_lines_intersection, *args)\n', (1334, 1382), False, 'import pytest\n'), ((1453, 1484), 'enphaseAI.problem1.find_lines_intersection', 'find_lines_intersection', (['l0', 'l1'], {}), '(l0, l1)\n', (1476, 1484), False, 'from enphaseAI.problem1 import find_lines_from_points, find_lines_intersection\n')]
|
"""
Integration/unit test for the AlleleFilter module.
Since it consists mostly of database queries, it's tested on a live database.
"""
import pytest
from datalayer import AlleleFilter
from vardb.datamodel import sample, jsonschema
FILTER_CONFIG_NUM = 0
def insert_filter_config(session, filter_config):
global FILTER_CONFIG_NUM
FILTER_CONFIG_NUM += 1
# Add dummy schema that allows for any object
jsonschema.JSONSchema.get_or_create(
session, **{"name": "filterconfig", "version": 10000, "schema": {"type": "object"}}
)
# Allelefilter expects the following to be defined. Set them if not.
for f in filter_config["filters"]:
f.setdefault("config", {})
f.setdefault("exceptions", [])
for e in f["exceptions"]:
e.setdefault("config", {})
fc = sample.FilterConfig(name="Test {}".format(FILTER_CONFIG_NUM), filterconfig=filter_config)
session.add(fc)
session.commit()
return fc.id
def create_filter_mock(to_remove):
def filter_mock(key_allele_ids):
result = dict()
for gp_key, allele_ids in key_allele_ids.items():
result[gp_key] = set(allele_ids) & set(to_remove)
return result
return filter_mock
@pytest.fixture
def allele_filter(session):
af = AlleleFilter(session, config={})
# Mock the built-in filters
def filter_one(key_allele_ids, filter_config):
return create_filter_mock([1])(key_allele_ids)
def filter_one_two(key_allele_ids, filter_config):
return create_filter_mock([1, 2])(key_allele_ids)
def filter_three_four(key_allele_ids, filter_config):
return create_filter_mock([3, 4])(key_allele_ids)
def filter_five_six(key_allele_ids, filter_config):
return create_filter_mock([5, 6])(key_allele_ids)
def filter_none(key_allele_ids, filter_config):
return create_filter_mock([])(key_allele_ids)
def filter_one_three_if_one(key_allele_ids, filter_config):
result = dict()
for gp_key, allele_ids in key_allele_ids.items():
if 1 in allele_ids:
result[gp_key] = set(allele_ids) & set([1, 3])
else:
result[gp_key] = set([])
return result
assert filter_one_three_if_one({1: [1, 3]}, None) == {1: set([1, 3])}
assert filter_one_three_if_one({1: [3]}, None) == {1: set([])}
af.filter_functions = {
"allele_one": ("allele", filter_one),
"allele_one_two": ("allele", filter_one_two),
"allele_duplicate_one_two": ("allele", filter_one_two),
"allele_three_four": ("allele", filter_three_four),
"allele_five_six": ("allele", filter_five_six),
"allele_filter_one_three_if_one": ("allele", filter_one_three_if_one),
"allele_none": ("allele", filter_none),
"analysis_one_two": ("analysis", filter_one_two),
"analysis_duplicate_one_two": ("analysis", filter_one_two),
"analysis_three_four": ("analysis", filter_three_four),
"analysis_five_six": ("analysis", filter_five_six),
"analysis_filter_one_three_if_one": ("analysis", filter_one_three_if_one),
"analysis_none": ("analysis", filter_none),
}
return af
class TestAlleleFilter(object):
@pytest.mark.aa(order=0)
def test_filter_alleles(self, session, allele_filter):
# ---------
# Test simple allele filter
filter_config = {"filters": [{"name": "allele_one_two"}], "filter_exceptions": []}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2], "key2": [1, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {"allele_ids": [], "excluded_allele_ids": {"allele_one_two": [1, 2]}},
"key2": {"allele_ids": [4], "excluded_allele_ids": {"allele_one_two": [1]}},
}
# ---------
assert result == expected_result
# Test multiple allele filters
filter_config = {
"filters": [
{"name": "allele_one_two"},
{"name": "allele_duplicate_one_two"},
{"name": "allele_three_four"},
{"name": "allele_five_six"},
{"name": "allele_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4, 5, 6, 7, 8, 9]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"allele_duplicate_one_two": [],
"allele_three_four": [3, 4],
"allele_five_six": [5, 6],
"allele_none": [],
},
}
}
assert result == expected_result
# ---------
# Test exceptions
# Test allele exception on allele filter
filter_config = {
"filters": [{"name": "allele_one_two", "exceptions": [{"name": "allele_one"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {"allele_ids": [1, 3, 4], "excluded_allele_ids": {"allele_one_two": [2]}}
}
assert result == expected_result
# ---------
# Test that analysis exception on allele filter fails
filter_config = {
"filters": [{"name": "allele_one_two", "exceptions": [{"name": "analysis_one_two"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
with pytest.raises(AssertionError):
allele_filter.filter_alleles(filter_config_id, {})
# ---------
# Test that exceptions only apply to the filter specified to
filter_config = {
"filters": [
{"name": "allele_one_two", "exceptions": [{"name": "allele_three_four"}]},
{"name": "allele_three_four", "exceptions": [{"name": "allele_one_two"}]},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {
"allele_ids": [],
"excluded_allele_ids": {"allele_one_two": [1, 2], "allele_three_four": [3, 4]},
}
}
assert result == expected_result
@pytest.mark.aa(order=1)
def test_filter_analysis(self, session, allele_filter):
# ---------
# Test single analysis filter
filter_config = {"filters": [{"name": "analysis_one_two"}]}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"analysis_one_two": [1, 2]},
}
assert result == expected_result
# ---------
# Test multiple analysis filters
filter_config = {
"filters": [
{"name": "analysis_one_two"},
{"name": "analysis_duplicate_one_two"},
{"name": "analysis_three_four"},
{"name": "analysis_five_six"},
{"name": "analysis_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4, 5, 6, 7, 8, 9]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"analysis_duplicate_one_two": [],
"analysis_three_four": [3, 4],
"analysis_five_six": [5, 6],
"analysis_none": [],
},
}
assert result == expected_result
# ---------
# Test combining analysis and allele filters
filter_config = {
"filters": [
# Overlapping allele and analysis filter
{"name": "allele_one_two"},
{"name": "analysis_one_two"},
{"name": "analysis_three_four"},
{"name": "allele_five_six"},
{"name": "analysis_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4, 5, 6, 7, 8, 9]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"analysis_one_two": [],
"analysis_three_four": [3, 4],
"allele_five_six": [5, 6],
"analysis_none": [],
},
}
assert result == expected_result
# ---------
# Test allele exception on analysis filter
filter_config = {
"filters": [{"name": "analysis_one_two", "exceptions": [{"name": "allele_one"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [1, 3, 4],
"excluded_allele_ids": {"analysis_one_two": [2]},
}
assert result == expected_result
# ---------
# Test analysis exception on analysis filter
filter_config = {
"filters": [
{"name": "analysis_one_two", "exceptions": [{"name": "analysis_one_two"}]},
{"name": "analysis_three_four", "exceptions": [{"name": "analysis_one_two"}]},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [1, 2],
"excluded_allele_ids": {"analysis_one_two": [], "analysis_three_four": [3, 4]},
}
assert result == expected_result
# ---------
filter_config = {"filters": [{"name": "analysis_one_two"}, {"name": "allele_one_two"}]}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"analysis_one_two": [1, 2], "allele_one_two": []},
}
assert result == expected_result
# ---------
# Test filters working conditionally to make sure
# previously filtered are not sent to next
# Four cases
# analysis -> analysis
filter_config = {
"filters": [{"name": "analysis_one_two"}, {"name": "analysis_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"analysis_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> analysis
filter_config = {
"filters": [{"name": "allele_one_two"}, {"name": "analysis_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"analysis_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> analysis
filter_config = {
"filters": [{"name": "analysis_one_two"}, {"name": "allele_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"allele_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> allele
filter_config = {
"filters": [{"name": "allele_one_two"}, {"name": "allele_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"allele_one_two": [1, 2], "allele_filter_one_three_if_one": []},
}
assert result == expected_result
|
[
"datalayer.AlleleFilter",
"pytest.raises",
"vardb.datamodel.jsonschema.JSONSchema.get_or_create",
"pytest.mark.aa"
] |
[((420, 544), 'vardb.datamodel.jsonschema.JSONSchema.get_or_create', 'jsonschema.JSONSchema.get_or_create', (['session'], {}), "(session, **{'name': 'filterconfig',\n 'version': 10000, 'schema': {'type': 'object'}})\n", (455, 544), False, 'from vardb.datamodel import sample, jsonschema\n'), ((1292, 1324), 'datalayer.AlleleFilter', 'AlleleFilter', (['session'], {'config': '{}'}), '(session, config={})\n', (1304, 1324), False, 'from datalayer import AlleleFilter\n'), ((3264, 3287), 'pytest.mark.aa', 'pytest.mark.aa', ([], {'order': '(0)'}), '(order=0)\n', (3278, 3287), False, 'import pytest\n'), ((6748, 6771), 'pytest.mark.aa', 'pytest.mark.aa', ([], {'order': '(1)'}), '(order=1)\n', (6762, 6771), False, 'import pytest\n'), ((5866, 5895), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5879, 5895), False, 'import pytest\n')]
|
# Generated by Django 3.0.7 on 2020-07-28 12:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0097_auto_20200724_1157'),
]
operations = [
migrations.RenameModel(
old_name='Culture',
new_name='SimulatorCulture',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((224, 295), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Culture"""', 'new_name': '"""SimulatorCulture"""'}), "(old_name='Culture', new_name='SimulatorCulture')\n", (246, 295), False, 'from django.db import migrations\n')]
|
# -*- encoding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import re
from abc import abstractmethod
import six
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.variables import PartitionedVariable
from easy_rec.python.compat import regularizers
from easy_rec.python.layers import input_layer
from easy_rec.python.utils import constant
from easy_rec.python.utils import estimator_utils
from easy_rec.python.utils import restore_filter
from easy_rec.python.utils.load_class import get_register_class_meta
if tf.__version__ >= '2.0':
tf = tf.compat.v1
_EASY_REC_MODEL_CLASS_MAP = {}
_meta_type = get_register_class_meta(
_EASY_REC_MODEL_CLASS_MAP, have_abstract_class=True)
class EasyRecModel(six.with_metaclass(_meta_type, object)):
def __init__(self,
model_config,
feature_configs,
features,
labels=None,
is_training=False):
self._base_model_config = model_config
self._model_config = model_config
self._is_training = is_training
self._feature_dict = features
self._emb_reg = regularizers.l2_regularizer(self.embedding_regularization)
self._l2_reg = regularizers.l2_regularizer(self.l2_regularization)
self._feature_configs = feature_configs
self.build_input_layer(model_config, feature_configs)
self._labels = labels
self._prediction_dict = {}
self._loss_dict = {}
# add sample weight from inputs
self._sample_weight = 1.0
if constant.SAMPLE_WEIGHT in features:
self._sample_weight = features[constant.SAMPLE_WEIGHT]
@property
def embedding_regularization(self):
return self._base_model_config.embedding_regularization
@property
def kd(self):
return self._base_model_config.kd
@property
def l2_regularization(self):
model_config = getattr(self._base_model_config,
self._base_model_config.WhichOneof('model'))
l2_regularization = 0.0
if hasattr(model_config, 'dense_regularization') and \
model_config.HasField('dense_regularization'):
# backward compatibility
tf.logging.warn(
'dense_regularization is deprecated, please use l2_regularization')
l2_regularization = model_config.dense_regularization
elif hasattr(model_config, 'l2_regularization'):
l2_regularization = model_config.l2_regularization
return l2_regularization
def build_input_layer(self, model_config, feature_configs):
self._input_layer = input_layer.InputLayer(
feature_configs,
model_config.feature_groups,
use_embedding_variable=model_config.use_embedding_variable,
embedding_regularizer=self._emb_reg,
kernel_regularizer=self._l2_reg,
variational_dropout_config=model_config.variational_dropout
if model_config.HasField('variational_dropout') else None,
is_training=False)
@abstractmethod
def build_predict_graph(self):
pass
@abstractmethod
def build_loss_graph(self):
pass
@abstractmethod
def build_metric_graph(self, eval_config):
pass
@abstractmethod
def get_outputs(self):
pass
def restore(self,
ckpt_path,
include_global_step=False,
ckpt_var_map_path='',
force_restore_shape_compatible=False):
"""Restore variables from ckpt_path.
steps:
1. list the variables in graph that need to be restored
2. inspect checkpoint and find the variables that could restore from checkpoint
substitute scope names in case necessary
3. call tf.train.init_from_checkpoint to restore the variables
Args:
ckpt_path: checkpoint path to restore from
include_global_step: whether to restore global_step variable
ckpt_var_map_path: variable map from graph variables to variables in a checkpoint
each line consists of: variable name in graph variable name in ckpt
force_restore_shape_compatible: if variable shape is incompatible, clip or pad
variables in checkpoint, and then restore
Returns:
IncompatibleShapeRestoreHook if force_shape_compatible else None
"""
name2var_map = self._get_restore_vars(ckpt_var_map_path)
logging.info('start to restore from %s' % ckpt_path)
if ckpt_path.endswith('/') or tf.gfile.IsDirectory(ckpt_path + '/'):
ckpt_path = estimator_utils.latest_checkpoint(ckpt_path)
print('ckpt_path is model_dir, will use the latest checkpoint: %s' %
ckpt_path)
ckpt_reader = tf.train.NewCheckpointReader(ckpt_path)
ckpt_var2shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_var2shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
incompatible_shape_var_map = {}
fail_restore_vars = []
for variable_name, variable in sorted(name2var_map.items()):
if variable_name in ckpt_var2shape_map:
print('restore %s' % variable_name)
ckpt_var_shape = ckpt_var2shape_map[variable_name]
if type(variable) == list:
shape_arr = [x.get_shape() for x in variable]
var_shape = list(shape_arr[0])
for x in shape_arr[1:]:
var_shape[0] += x[0]
var_shape = tensor_shape.TensorShape(var_shape)
variable = PartitionedVariable(
variable_name,
var_shape,
variable[0].dtype,
variable,
partitions=[len(variable)] + [1] * (len(var_shape) - 1))
else:
var_shape = variable.shape.as_list()
if ckpt_var_shape == var_shape:
vars_in_ckpt[variable_name] = list(variable) if isinstance(
variable, PartitionedVariable) else variable
elif len(ckpt_var_shape) == len(var_shape):
if force_restore_shape_compatible:
# create a variable compatible with checkpoint to restore
dtype = variable[0].dtype if isinstance(variable,
list) else variable.dtype
with tf.variable_scope('incompatible_shape_restore'):
tmp_var = tf.get_variable(
name=variable_name + '_T_E_M_P',
shape=ckpt_var_shape,
trainable=False,
# add to a special collection for easy reference
# by tf.get_collection('T_E_M_P_RESTROE')
collections=['T_E_M_P_RESTROE'],
dtype=dtype)
vars_in_ckpt[variable_name] = tmp_var
incompatible_shape_var_map[variable] = tmp_var
print('incompatible restore %s[%s, %s]' %
(variable_name, str(var_shape), str(ckpt_var_shape)))
else:
logging.warning(
'Variable [%s] is available in checkpoint, but '
'incompatible shape with model variable.', variable_name)
else:
logging.warning(
'Variable [%s] is available in checkpoint, but '
'incompatible shape dims with model variable.', variable_name)
else:
fail_restore_vars.append(variable_name)
for variable_name in fail_restore_vars:
if 'Momentum' not in variable_name:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
tf.train.init_from_checkpoint(ckpt_path, vars_in_ckpt)
if force_restore_shape_compatible:
return estimator_utils.IncompatibleShapeRestoreHook(
incompatible_shape_var_map)
else:
return None
def _get_restore_vars(self, ckpt_var_map_path):
"""Restore by specify variable map between graph variables and ckpt variables.
Args:
ckpt_var_map_path: variable map from graph variables to variables in a checkpoint
each line consists of: variable name in graph variable name in ckpt
Returns:
the list of variables which need to restore from checkpoint
"""
# here must use global_variables, because variables such as moving_mean
# and moving_variance is usually not trainable in detection models
all_vars = tf.global_variables()
PARTITION_PATTERN = '/part_[0-9]+'
VAR_SUFIX_PATTERN = ':[0-9]$'
name2var = {}
for one_var in all_vars:
var_name = re.sub(VAR_SUFIX_PATTERN, '', one_var.name)
if re.search(PARTITION_PATTERN,
var_name) and (not var_name.endswith('/AdamAsync_2') and
not var_name.endswith('/AdamAsync_3')):
var_name = re.sub(PARTITION_PATTERN, '', var_name)
is_part = True
else:
is_part = False
if var_name in name2var:
assert is_part, 'multiple vars: %s' % var_name
name2var[var_name].append(one_var)
else:
name2var[var_name] = [one_var] if is_part else one_var
if ckpt_var_map_path != '':
if not tf.gfile.Exists(ckpt_var_map_path):
logging.warning('%s not exist' % ckpt_var_map_path)
return name2var
# load var map
name_map = {}
with open(ckpt_var_map_path, 'r') as fin:
for one_line in fin:
one_line = one_line.strip()
line_tok = [x for x in one_line.split() if x != '']
if len(line_tok) != 2:
logging.warning('Failed to process: %s' % one_line)
continue
name_map[line_tok[0]] = line_tok[1]
var_map = {}
for var_name in name2var:
if var_name in name_map:
in_ckpt_name = name_map[var_name]
var_map[in_ckpt_name] = name2var[var_name]
else:
logging.warning('Failed to find in var_map_file(%s): %s' %
(ckpt_var_map_path, var_name))
return name2var
else:
var_filter, scope_update = self.get_restore_filter()
if var_filter is not None:
name2var = {
var_name: name2var[var_name]
for var in name2var
if var_filter.keep(var.name)
}
# drop scope prefix if necessary
if scope_update is not None:
name2var = {
scope_update(var_name): name2var[var_name] for var_name in name2var
}
return name2var
def get_restore_filter(self):
"""Get restore variable filter.
Return:
filter: type of Filter in restore_filter.py
scope_drop: type of ScopeDrop in restore_filter.py
"""
if len(self._base_model_config.restore_filters) == 0:
return None, None
for x in self._base_model_config.restore_filters:
logging.info('restore will filter out pattern %s' % x)
all_filters = [
restore_filter.KeywordFilter(x, True)
for x in self._base_model_config.restore_filters
]
return restore_filter.CombineFilter(all_filters,
restore_filter.Logical.AND), None
def get_grouped_vars(self):
"""Get grouped variables, each group will be optimized by a separate optimizer.
Return:
grouped_vars: list of list of variables
"""
raise NotImplementedError()
|
[
"tensorflow.gfile.Exists",
"easy_rec.python.utils.restore_filter.KeywordFilter",
"easy_rec.python.compat.regularizers.l2_regularizer",
"tensorflow.train.NewCheckpointReader",
"tensorflow.global_variables",
"tensorflow.python.framework.tensor_shape.TensorShape",
"easy_rec.python.utils.load_class.get_register_class_meta",
"six.with_metaclass",
"tensorflow.get_variable",
"logging.warning",
"tensorflow.variable_scope",
"re.search",
"re.sub",
"easy_rec.python.utils.estimator_utils.latest_checkpoint",
"tensorflow.gfile.IsDirectory",
"easy_rec.python.utils.restore_filter.CombineFilter",
"tensorflow.logging.warn",
"logging.info",
"tensorflow.train.init_from_checkpoint",
"easy_rec.python.utils.estimator_utils.IncompatibleShapeRestoreHook"
] |
[((687, 763), 'easy_rec.python.utils.load_class.get_register_class_meta', 'get_register_class_meta', (['_EASY_REC_MODEL_CLASS_MAP'], {'have_abstract_class': '(True)'}), '(_EASY_REC_MODEL_CLASS_MAP, have_abstract_class=True)\n', (710, 763), False, 'from easy_rec.python.utils.load_class import get_register_class_meta\n'), ((790, 828), 'six.with_metaclass', 'six.with_metaclass', (['_meta_type', 'object'], {}), '(_meta_type, object)\n', (808, 828), False, 'import six\n'), ((1174, 1232), 'easy_rec.python.compat.regularizers.l2_regularizer', 'regularizers.l2_regularizer', (['self.embedding_regularization'], {}), '(self.embedding_regularization)\n', (1201, 1232), False, 'from easy_rec.python.compat import regularizers\n'), ((1252, 1303), 'easy_rec.python.compat.regularizers.l2_regularizer', 'regularizers.l2_regularizer', (['self.l2_regularization'], {}), '(self.l2_regularization)\n', (1279, 1303), False, 'from easy_rec.python.compat import regularizers\n'), ((4302, 4354), 'logging.info', 'logging.info', (["('start to restore from %s' % ckpt_path)"], {}), "('start to restore from %s' % ckpt_path)\n", (4314, 4354), False, 'import logging\n'), ((4610, 4649), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['ckpt_path'], {}), '(ckpt_path)\n', (4638, 4649), True, 'import tensorflow as tf\n'), ((7430, 7484), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['ckpt_path', 'vars_in_ckpt'], {}), '(ckpt_path, vars_in_ckpt)\n', (7459, 7484), True, 'import tensorflow as tf\n'), ((8213, 8234), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8232, 8234), True, 'import tensorflow as tf\n'), ((2185, 2273), 'tensorflow.logging.warn', 'tf.logging.warn', (['"""dense_regularization is deprecated, please use l2_regularization"""'], {}), "(\n 'dense_regularization is deprecated, please use l2_regularization')\n", (2200, 2273), True, 'import tensorflow as tf\n'), ((4390, 4427), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (["(ckpt_path + '/')"], {}), "(ckpt_path + '/')\n", (4410, 4427), True, 'import tensorflow as tf\n'), ((4447, 4491), 'easy_rec.python.utils.estimator_utils.latest_checkpoint', 'estimator_utils.latest_checkpoint', (['ckpt_path'], {}), '(ckpt_path)\n', (4480, 4491), False, 'from easy_rec.python.utils import estimator_utils\n'), ((7538, 7610), 'easy_rec.python.utils.estimator_utils.IncompatibleShapeRestoreHook', 'estimator_utils.IncompatibleShapeRestoreHook', (['incompatible_shape_var_map'], {}), '(incompatible_shape_var_map)\n', (7582, 7610), False, 'from easy_rec.python.utils import estimator_utils\n'), ((8373, 8416), 're.sub', 're.sub', (['VAR_SUFIX_PATTERN', '""""""', 'one_var.name'], {}), "(VAR_SUFIX_PATTERN, '', one_var.name)\n", (8379, 8416), False, 'import re\n'), ((10615, 10669), 'logging.info', 'logging.info', (["('restore will filter out pattern %s' % x)"], {}), "('restore will filter out pattern %s' % x)\n", (10627, 10669), False, 'import logging\n'), ((10699, 10736), 'easy_rec.python.utils.restore_filter.KeywordFilter', 'restore_filter.KeywordFilter', (['x', '(True)'], {}), '(x, True)\n', (10727, 10736), False, 'from easy_rec.python.utils import restore_filter\n'), ((10812, 10881), 'easy_rec.python.utils.restore_filter.CombineFilter', 'restore_filter.CombineFilter', (['all_filters', 'restore_filter.Logical.AND'], {}), '(all_filters, restore_filter.Logical.AND)\n', (10840, 10881), False, 'from easy_rec.python.utils import restore_filter\n'), ((7322, 7400), 'logging.warning', 'logging.warning', (['"""Variable [%s] is not available in checkpoint"""', 'variable_name'], {}), "('Variable [%s] is not available in checkpoint', variable_name)\n", (7337, 7400), False, 'import logging\n'), ((8426, 8464), 're.search', 're.search', (['PARTITION_PATTERN', 'var_name'], {}), '(PARTITION_PATTERN, var_name)\n', (8435, 8464), False, 'import re\n'), ((8624, 8663), 're.sub', 're.sub', (['PARTITION_PATTERN', '""""""', 'var_name'], {}), "(PARTITION_PATTERN, '', var_name)\n", (8630, 8663), False, 'import re\n'), ((8973, 9007), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['ckpt_var_map_path'], {}), '(ckpt_var_map_path)\n', (8988, 9007), True, 'import tensorflow as tf\n'), ((9017, 9068), 'logging.warning', 'logging.warning', (["('%s not exist' % ckpt_var_map_path)"], {}), "('%s not exist' % ckpt_var_map_path)\n", (9032, 9068), False, 'import logging\n'), ((5329, 5364), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['var_shape'], {}), '(var_shape)\n', (5353, 5364), False, 'from tensorflow.python.framework import tensor_shape\n'), ((9681, 9775), 'logging.warning', 'logging.warning', (["('Failed to find in var_map_file(%s): %s' % (ckpt_var_map_path, var_name))"], {}), "('Failed to find in var_map_file(%s): %s' % (\n ckpt_var_map_path, var_name))\n", (9696, 9775), False, 'import logging\n'), ((7011, 7145), 'logging.warning', 'logging.warning', (['"""Variable [%s] is available in checkpoint, but incompatible shape dims with model variable."""', 'variable_name'], {}), "(\n 'Variable [%s] is available in checkpoint, but incompatible shape dims with model variable.'\n , variable_name)\n", (7026, 7145), False, 'import logging\n'), ((9357, 9408), 'logging.warning', 'logging.warning', (["('Failed to process: %s' % one_line)"], {}), "('Failed to process: %s' % one_line)\n", (9372, 9408), False, 'import logging\n'), ((6831, 6960), 'logging.warning', 'logging.warning', (['"""Variable [%s] is available in checkpoint, but incompatible shape with model variable."""', 'variable_name'], {}), "(\n 'Variable [%s] is available in checkpoint, but incompatible shape with model variable.'\n , variable_name)\n", (6846, 6960), False, 'import logging\n'), ((6143, 6190), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""incompatible_shape_restore"""'], {}), "('incompatible_shape_restore')\n", (6160, 6190), True, 'import tensorflow as tf\n'), ((6216, 6353), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "(variable_name + '_T_E_M_P')", 'shape': 'ckpt_var_shape', 'trainable': '(False)', 'collections': "['T_E_M_P_RESTROE']", 'dtype': 'dtype'}), "(name=variable_name + '_T_E_M_P', shape=ckpt_var_shape,\n trainable=False, collections=['T_E_M_P_RESTROE'], dtype=dtype)\n", (6231, 6353), True, 'import tensorflow as tf\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock PyReach Color Camera."""
from typing import Callable, Optional
import numpy as np # type: ignore
from pyreach import calibration as cal
from pyreach import color_camera
from pyreach import core
from pyreach.mock import calibration_mock as cal_mock
class ColorFrameMock(color_camera.ColorFrame):
"""A single color camera frame taken at a specific time.
Attributes:
time: The time in seconds of the frame since 1970.
sequence: The sequence number of the color frame.
device_type: The JSON device type string.
device_name: The JSON device name string.
color_image: A color image as a (DX,DY,3) array of uint8's.
calibration: The calibration when the image is captured.
"""
def __init__(self, time: float, sequence: int,
device_type: str, device_name: str,
color_image: np.ndarray,
calibration: Optional[cal.Calibration]) -> None:
"""Initialize a MockColorFrame."""
self._time: float = time
self._sequence = sequence
self._device_type: str = device_type
self._device_name: str = device_name
self._color_image: np.ndarray = color_image
self._calibration: Optional[cal.Calibration] = calibration
@property
def time(self) -> float:
"""Return timestamp of the ColorFrame."""
return self._time
@property
def sequence(self) -> int:
"""Sequence number of the ColorFrame."""
return self._sequence
@property
def device_type(self) -> str:
"""Return the Reach device type."""
return self._device_type
@property
def device_name(self) -> str:
"""Return the Reach device name."""
return self._device_name
@property
def color_image(self) -> np.ndarray:
"""Return the color image as a (DX,DY,3)."""
return self._color_image
@property
def calibration(self) -> Optional[cal.Calibration]:
"""Return the Calibration for for the ColorFrame."""
return self._calibration
def pose(self) -> Optional[core.Pose]:
"""Return the pose of the camera when the image is taken."""
raise NotImplementedError
class ColorCameraMock(color_camera.ColorCamera):
"""Mock ColorCamera class."""
def __init__(self) -> None:
"""Init a MockColorCamera."""
pass
def add_update_callback(
self,
callback: Callable[[color_camera.ColorFrame], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback function to be invoked when a new frame is available.
Args:
callback: A function to be invoked when a new frame is available. Returns
False to continue receiving new images. Returns True to stop further
update.
finished_callback: Optional callback, called when the callback is stopped
or if the camera is closed.
Returns:
A function that when called stops the callbacks.
"""
raise NotImplementedError
def start_streaming(self, request_period: float = 0.1) -> None:
"""Start streaming of camera images.
Args:
request_period: The number of seconds between frames. Defaults to .1
second between frames.
"""
pass
def stop_streaming(self) -> None:
"""Stop streaming camera images."""
raise NotImplementedError
def supports_tagged_request(self) -> bool:
"""Return True if tagged requests are supported."""
raise NotImplementedError
def enable_tagged_request(self) -> None:
"""Enable tagged requests."""
raise NotImplementedError
def disable_tagged_request(self) -> None:
"""Disable tagged requests."""
raise NotImplementedError
def image(self) -> Optional[color_camera.ColorFrame]:
"""Return the latest image if it exists."""
color_frame_mock: ColorFrameMock = ColorFrameMock(
1.0, 0, "device_type", "device_name", np.zeros((3, 5, 3),
dtype=np.uint8),
cal_mock.CalibrationMock("device_type", "device_name",
"color_camera_link_name"))
color_frame: color_camera.ColorFrame = color_frame_mock
return color_frame
def fetch_image(self,
timeout: float = 15.0) -> Optional[color_camera.ColorFrame]:
"""Fetch a new image or possibly times out.
Args:
timeout: The optional amount of time to wait for a camera frame. If not
specified, 15 seconds is the default timeout.
Returns:
Returns the color image or None for a timeout.
"""
raise NotImplementedError
def async_fetch_image(self,
callback: Optional[Callable[[color_camera.ColorFrame],
None]] = None,
error_callback: Optional[Callable[[core.PyReachStatus],
None]] = None,
timeout: float = 30) -> None:
"""Fetch a new image asynchronously.
The callback function will be invoked when new image is available.
Args:
callback: A callback function that is called when an image arrives. If the
camera fails to load an image, the callback is not called.
error_callback: Optional callback that is called if there is an error.
timeout: Timeout for the fetch, defaults to 30 seconds.
"""
raise NotImplementedError
@property
def pose(self) -> Optional[core.Pose]:
"""Return the latest pose of the camera."""
raise NotImplementedError
|
[
"pyreach.mock.calibration_mock.CalibrationMock",
"numpy.zeros"
] |
[((4429, 4464), 'numpy.zeros', 'np.zeros', (['(3, 5, 3)'], {'dtype': 'np.uint8'}), '((3, 5, 3), dtype=np.uint8)\n', (4437, 4464), True, 'import numpy as np\n'), ((4529, 4614), 'pyreach.mock.calibration_mock.CalibrationMock', 'cal_mock.CalibrationMock', (['"""device_type"""', '"""device_name"""', '"""color_camera_link_name"""'], {}), "('device_type', 'device_name', 'color_camera_link_name'\n )\n", (4553, 4614), True, 'from pyreach.mock import calibration_mock as cal_mock\n')]
|
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.mo.managed_entity import ManagedEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostSystem(ManagedEntity):
'''The HostSystem managed object type provides access to a virtualization host
platform.Invoking destroy on a HostSystem of standalone type throws a
NotSupported fault. A standalone HostSystem can be destroyed only by invoking
destroy on its parent ComputeResource. Invoking destroy on a failover host
throws a DisallowedOperationOnFailoverHost fault. See
ClusterFailoverHostAdmissionControlPolicy.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostSystem):
super(HostSystem, self).__init__(core, name=name, ref=ref, type=type)
@property
def capability(self):
'''Host capabilities. This might not be available for a disconnected host.'''
return self.update('capability')
@property
def config(self):
'''Host configuration information. This might not be available for a disconnected
host.'''
return self.update('config')
@property
def configManager(self):
'''Host configuration systems.'''
return self.update('configManager')
@property
def datastore(self):
'''A collection of references to the subset of datastore objects in the datacenter
that are available in this HostSystem.'''
return self.update('datastore')
@property
def datastoreBrowser(self):
'''DatastoreBrowser to browse datastores for this host.'''
return self.update('datastoreBrowser')
@property
def hardware(self):
'''Hardware configuration of the host. This might not be available for a
disconnected host.'''
return self.update('hardware')
@property
def licensableResource(self):
'''Information about all licensable resources, currently present on this host.'''
return self.update('licensableResource')
@property
def network(self):
'''A collection of references to the subset of network objects in the datacenter
that are available in this HostSystem.'''
return self.update('network')
@property
def runtime(self):
'''Runtime state information about the host such as connection state.'''
return self.update('runtime')
@property
def summary(self):
'''Basic information about the host, including connection state.'''
return self.update('summary')
@property
def systemResources(self):
'''Reference for the system resource hierarchy, used for configuring the set of
resources reserved to the system and unavailable to virtual machines.'''
return self.update('systemResources')
@property
def vm(self):
'''List of virtual machines associated with this host.'''
return self.update('vm')
def AcquireCimServicesTicket(self):
'''Creates and returns a one-time credential used to establish a remote connection
to a CIM interface. The port to connect to is the standard well known port for
the service.
'''
return self.delegate("AcquireCimServicesTicket")()
def DisconnectHost_Task(self):
'''Disconnects from a host and instructs the server to stop sending heartbeats.
'''
return self.delegate("DisconnectHost_Task")()
def EnterLockdownMode(self):
'''Modifies the permissions on the host, so that it will only be accessible
through local console or an authorized centralized management application. Any
user defined permissions found on the host are lost.Modifies the permissions on
the host, so that it will only be accessible through local console or an
authorized centralized management application. Any user defined permissions
found on the host are lost.Modifies the permissions on the host, so that it
will only be accessible through local console or an authorized centralized
management application. Any user defined permissions found on the host are
lost.
'''
return self.delegate("EnterLockdownMode")()
def EnterMaintenanceMode_Task(self, timeout, evacuatePoweredOffVms=None):
'''Puts the host in maintenance mode. While this task is running and when the host
is in maintenance mode, no virtual machines can be powered on and no
provisioning operations can be performed on the host. Once the call completes,
it is safe to turn off a host without disrupting any virtual machines.Puts the
host in maintenance mode. While this task is running and when the host is in
maintenance mode, no virtual machines can be powered on and no provisioning
operations can be performed on the host. Once the call completes, it is safe to
turn off a host without disrupting any virtual machines.Puts the host in
maintenance mode. While this task is running and when the host is in
maintenance mode, no virtual machines can be powered on and no provisioning
operations can be performed on the host. Once the call completes, it is safe to
turn off a host without disrupting any virtual machines.
:param timeout: The task completes when the host successfully enters maintenance mode or the timeout expires, and in the latter case the task contains a Timeout fault. If the timeout is less than or equal to zero, there is no timeout. The timeout is specified in seconds.
:param evacuatePoweredOffVms: This is a parameter only supported by VirtualCenter. If set to true, for a DRS disabled cluster, the task will not succeed unless all powered-off virtual machines have been manually reregistered; for a DRS enabled cluster, VirtualCenter will automatically reregister powered-off virtual machines and a powered-off virtual machine may remain at the host only for two reasons: (a) no compatible host found for reregistration, (b) DRS is disabled for the virtual machine. If set to false, powered-off virtual machines do not need to be moved.VI API 2.5
'''
return self.delegate("EnterMaintenanceMode_Task")(timeout, evacuatePoweredOffVms)
def ExitLockdownMode(self):
'''Restores Administrator permission for the local administrative account for the
host that was removed by prior call to EnterLockdownMode. If the operation is
successful, adminDisabled will be set to false. This API is not supported on
the host. If invoked directly on a host, a NotSupported fault will be
thrown.See AuthorizationManager
'''
return self.delegate("ExitLockdownMode")()
def ExitMaintenanceMode_Task(self, timeout):
'''Takes the host out of maintenance mode. This blocks if any concurrent running
maintenance-only host configurations operations are being performed. For
example, if VMFS volumes are being upgraded.Takes the host out of maintenance
mode. This blocks if any concurrent running maintenance-only host
configurations operations are being performed. For example, if VMFS volumes are
being upgraded.
:param timeout: Number of seconds to wait for the exit maintenance mode to succeed. If the timeout is less than or equal to zero, there is no timeout.
'''
return self.delegate("ExitMaintenanceMode_Task")(timeout)
def PowerDownHostToStandBy_Task(self, timeoutSec, evacuatePoweredOffVms=None):
'''Puts the host in standby mode, a mode in which the host is in a standby state
from which it can be powered up remotely. While this task is running, no
virtual machines can be powered on and no provisioning operations can be
performed on the host.Puts the host in standby mode, a mode in which the host
is in a standby state from which it can be powered up remotely. While this task
is running, no virtual machines can be powered on and no provisioning
operations can be performed on the host.Puts the host in standby mode, a mode
in which the host is in a standby state from which it can be powered up
remotely. While this task is running, no virtual machines can be powered on and
no provisioning operations can be performed on the host.Puts the host in
standby mode, a mode in which the host is in a standby state from which it can
be powered up remotely. While this task is running, no virtual machines can be
powered on and no provisioning operations can be performed on the host.
:param timeoutSec: The task completes when the host successfully enters standby mode and stops sending heartbeat signals. If heartbeats are still coming after timeoutSecs seconds, the host is declared timedout, and the task is assumed failed.
:param evacuatePoweredOffVms: This is a parameter used only by VirtualCenter. If set to true, for a DRS disabled cluster, the task will not succeed unless all powered-off virtual machines have been manually reregistered; for a DRS enabled cluster, VirtualCenter will automatically reregister powered-off virtual machines and a powered-off virtual machine may remain at the host only for two reasons: (a) no compatible host found for reregistration, (b) DRS is disabled for the virtual machine.
'''
return self.delegate("PowerDownHostToStandBy_Task")(timeoutSec, evacuatePoweredOffVms)
def PowerUpHostFromStandBy_Task(self, timeoutSec):
'''Takes the host out of standby mode. If the command is successful, the host
wakes up and starts sending heartbeats. This method may be called automatically
by a dynamic recommendation generation module to add capacity to a cluster, if
the host is not in maintenance mode.Takes the host out of standby mode. If the
command is successful, the host wakes up and starts sending heartbeats. This
method may be called automatically by a dynamic recommendation generation
module to add capacity to a cluster, if the host is not in maintenance
mode.Takes the host out of standby mode. If the command is successful, the host
wakes up and starts sending heartbeats. This method may be called automatically
by a dynamic recommendation generation module to add capacity to a cluster, if
the host is not in maintenance mode.
:param timeoutSec: The task completes when the host successfully exits standby state and sends a heartbeat signal. If nothing is received from the host for timeoutSec seconds, the host is declared timedout, and the task is assumed failed.
'''
return self.delegate("PowerUpHostFromStandBy_Task")(timeoutSec)
def QueryHostConnectionInfo(self):
'''Connection-oriented information about a host.
'''
return self.delegate("QueryHostConnectionInfo")()
def QueryMemoryOverhead(self, memorySize, numVcpus, videoRamSize=None):
'''<b>Deprecated.</b> <i>As of VI API 2.5, use QueryMemoryOverheadEx.</i>
Determines the amount of memory overhead necessary to power on a virtual
machine with the specified characteristics.
:param memorySize: The amount of virtual system RAM, in bytes. For an existing virtual machine, this value can be found (in megabytes) as the memoryMB property of the VirtualHardware.
:param videoRamSize: The amount of virtual video RAM, in bytes. For an existing virtual machine on a host that supports advertising this property, this value can be found (in kilobytes) as the videoRamSizeInKB property of the VirtualMachineVideoCard. If this parameter is left unset, the default video RAM size for virtual machines on this host is assumed.
:param numVcpus: The number of virtual CPUs. For an existing virtual machine, this value can be found as the numCPU property of the VirtualHardware.
'''
return self.delegate("QueryMemoryOverhead")(memorySize, videoRamSize, numVcpus)
def QueryMemoryOverheadEx(self, vmConfigInfo):
'''Determines the amount of memory overhead necessary to power on a virtual
machine with the specified characteristics.
:param vmConfigInfo: The configuration of the virtual machine.
'''
return self.delegate("QueryMemoryOverheadEx")(vmConfigInfo)
def RebootHost_Task(self, force):
'''Reboots a host. If the command is successful, then the host has been rebooted.
If connected directly to the host, the client never receives an indicator of
success in the returned task but simply loses connection to the host, upon
success.Reboots a host. If the command is successful, then the host has been
rebooted. If connected directly to the host, the client never receives an
indicator of success in the returned task but simply loses connection to the
host, upon success.
:param force: Flag to specify whether or not the host should be rebooted regardless of whether it is in maintenance mode. If true, the host is rebooted, even if there are virtual machines running or other operations in progress.
'''
return self.delegate("RebootHost_Task")(force)
def ReconfigureHostForDAS_Task(self):
'''Reconfigures the host for vSphere HA.Reconfigures the host for vSphere HA.
'''
return self.delegate("ReconfigureHostForDAS_Task")()
def ReconnectHost_Task(self, cnxSpec=None, reconnectSpec=None):
'''Reconnects to a host. This process reinstalls agents and reconfigures the host,
if it has gotten out of date with VirtualCenter. The reconnection process goes
through many of the same steps as addHost: ensuring the correct set of licenses
for the number of CPUs on the host, ensuring the correct set of agents is
installed, and ensuring that networks and datastores are discovered and
registered with VirtualCenter.Reconnects to a host. This process reinstalls
agents and reconfigures the host, if it has gotten out of date with
VirtualCenter. The reconnection process goes through many of the same steps as
addHost: ensuring the correct set of licenses for the number of CPUs on the
host, ensuring the correct set of agents is installed, and ensuring that
networks and datastores are discovered and registered with
VirtualCenter.Reconnects to a host. This process reinstalls agents and
reconfigures the host, if it has gotten out of date with VirtualCenter. The
reconnection process goes through many of the same steps as addHost: ensuring
the correct set of licenses for the number of CPUs on the host, ensuring the
correct set of agents is installed, and ensuring that networks and datastores
are discovered and registered with VirtualCenter.Reconnects to a host. This
process reinstalls agents and reconfigures the host, if it has gotten out of
date with VirtualCenter. The reconnection process goes through many of the same
steps as addHost: ensuring the correct set of licenses for the number of CPUs
on the host, ensuring the correct set of agents is installed, and ensuring that
networks and datastores are discovered and registered with
VirtualCenter.Reconnects to a host. This process reinstalls agents and
reconfigures the host, if it has gotten out of date with VirtualCenter. The
reconnection process goes through many of the same steps as addHost: ensuring
the correct set of licenses for the number of CPUs on the host, ensuring the
correct set of agents is installed, and ensuring that networks and datastores
are discovered and registered with VirtualCenter.
:param cnxSpec: Includes the parameters to use, including user name and password, when reconnecting to the host. If this parameter is not specified, the default connection parameters is used.
:param reconnectSpec: Includes connection parameters specific to reconnect. This will mainly be used to indicate how to handle divergence between the host settings and vCenter Server settings when the host was disconnected.vSphere API 5.0
'''
return self.delegate("ReconnectHost_Task")(cnxSpec, reconnectSpec)
def RetrieveHardwareUptime(self):
'''Return the hardware uptime of the host in seconds. The harware uptime of a host
is not affected by NTP and changes to its wall clock time and can be used by
clients to provide a common time reference for all hosts.
'''
return self.delegate("RetrieveHardwareUptime")()
def ShutdownHost_Task(self, force):
'''Shuts down a host. If the command is successful, then the host has been shut
down. Thus, the client never receives an indicator of success in the returned
task if connected directly to the host.Shuts down a host. If the command is
successful, then the host has been shut down. Thus, the client never receives
an indicator of success in the returned task if connected directly to the host.
:param force: Flag to specify whether or not the host should be shut down regardless of whether it is in maintenance mode. If true, the host is shut down, even if there are virtual machines running or other operations in progress.
'''
return self.delegate("ShutdownHost_Task")(force)
def UpdateFlags(self, flagInfo):
'''Update flags that are part of the HostFlagInfo object.
:param flagInfo:
'''
return self.delegate("UpdateFlags")(flagInfo)
def UpdateIpmi(self, ipmiInfo):
'''Update fields that are part of the HostIpmiInfo object.
:param ipmiInfo:
'''
return self.delegate("UpdateIpmi")(ipmiInfo)
def UpdateSystemResources(self, resourceInfo):
'''Update the configuration of the system resource hierarchy.
:param resourceInfo:
'''
return self.delegate("UpdateSystemResources")(resourceInfo)
def RetrieveManagedMethodExecuter(self):
return self.delegate("RetrieveManagedMethodExecuter")()
def RetrieveDynamicTypeManager(self):
return self.delegate("RetrieveDynamicTypeManager")()
|
[
"logging.getLogger"
] |
[((265, 292), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'import logging\n')]
|
from util.Color import *
class Consulta():
def __init__(self, preco, data, paciente_id, medico_id, realizada, paga, id=None):
self.id=id
self.preco=preco
self.data=data
self.paciente_id=paciente_id
self.medico_id=medico_id
self.realizada=realizada
self.paga=paga
def infoConsulta(self, sistema):
from database.MedicoDAO import MedicoDAO
from database.PacienteDAO import PacienteDAO
print(CYAN + "CONSULTA" + RESET)
print("Médico: " + MedicoDAO().getByID(self.medico_id).nome)
print("Paciente: " + PacienteDAO().getByID(self.paciente_id).nome)
|
[
"database.PacienteDAO.PacienteDAO",
"database.MedicoDAO.MedicoDAO"
] |
[((555, 566), 'database.MedicoDAO.MedicoDAO', 'MedicoDAO', ([], {}), '()\n', (564, 566), False, 'from database.MedicoDAO import MedicoDAO\n'), ((626, 639), 'database.PacienteDAO.PacienteDAO', 'PacienteDAO', ([], {}), '()\n', (637, 639), False, 'from database.PacienteDAO import PacienteDAO\n')]
|
"""electoral URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService
urlpatterns = [
path('admin/', admin.site.urls),
path('api/testdata/', TestDataView.as_view()),
path('api/authenticate/', Authenticate.as_view()),
path('privacy-policy/', PrivacyPolicy.as_view()),
path('terms-of-service/', TermsOfService.as_view()),
re_path(r'^', FrontendAppView.as_view()),
]
|
[
"electoral_backend.views.Authenticate.as_view",
"electoral_backend.views.PrivacyPolicy.as_view",
"django.urls.path",
"electoral_backend.views.FrontendAppView.as_view",
"electoral_backend.views.TestDataView.as_view",
"electoral_backend.views.TermsOfService.as_view"
] |
[((836, 867), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (840, 867), False, 'from django.urls import path, re_path\n'), ((895, 917), 'electoral_backend.views.TestDataView.as_view', 'TestDataView.as_view', ([], {}), '()\n', (915, 917), False, 'from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService\n'), ((950, 972), 'electoral_backend.views.Authenticate.as_view', 'Authenticate.as_view', ([], {}), '()\n', (970, 972), False, 'from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService\n'), ((1003, 1026), 'electoral_backend.views.PrivacyPolicy.as_view', 'PrivacyPolicy.as_view', ([], {}), '()\n', (1024, 1026), False, 'from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService\n'), ((1059, 1083), 'electoral_backend.views.TermsOfService.as_view', 'TermsOfService.as_view', ([], {}), '()\n', (1081, 1083), False, 'from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService\n'), ((1104, 1129), 'electoral_backend.views.FrontendAppView.as_view', 'FrontendAppView.as_view', ([], {}), '()\n', (1127, 1129), False, 'from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService\n')]
|
import numpy as np
from scipy.fftpack import rfft, irfft, rfftfreq
from ....routines import rescale
def fourier_filter(data: np.ndarray, fs: float,
lp_freq: float = None, hp_freq: float = None, bs_freqs: list = [],
trans_width: float = 1, band_width: float = 1) -> np.ndarray:
"""
Fourer filter along last axis of ``data`` with lowpass, highpass and bandstop options.
Parameters
----------
``data`` : np.ndarray
``fs``: float
sampling frequency
``lp_freq``: float, optional
lowpass frequency (default is None)
``hp_freq``: float, optional
highpass frequency (default is None)
``bs_freqs``: list, optional
bandstop frequencies (default is [])
``trans_width``: float, optional
width of the transition region between bands (default is 1)
in frequency units
``band_width``: float, optional
width of the band to remove (default is 1)
in frequency units
Returns
-------
np.ndarray
filtered ``data``
"""
T = data.shape[-1]
d = 1. / fs
freq = rfftfreq(T, d)
f_data = rfft(data, axis=-1)
freq_resp = create_freq_resp(freq, lp_freq,
hp_freq, bs_freqs,
trans_width, band_width)
f_data = np.apply_along_axis(lambda x: x * freq_resp, -1, f_data)
data_filtered = irfft(f_data)
return data_filtered
def create_freq_resp(freq: np.ndarray, lp_freq: float,
hp_freq: float, bs_freqs: list,
trans_width: float, band_width: float) -> np.ndarray:
"""Calculates frequency responce for given ``freq``
Parameters
----------
``freq``: np.ndarray, shape=(N)
frequency array
``lp_freq``: float
lowpass frequency
``hp_freq``: float
highpass frequency
``bs_freqs``: list
bandstop frequencies
``trans_width``: float
width of the transition region between bands
``band_width``: float
width of the band to remove
Returns
--------
np.ndarray, shape=(N)
frequency responce array in range form 0 to 1
"""
freq_resp = np.ones_like(freq)
if lp_freq is not None:
freq_resp *= FR_lowpass(freq, lp_freq, trans_width)
if hp_freq is not None:
freq_resp *= FR_highpass(freq, hp_freq, trans_width)
for bs_freq in bs_freqs:
freq_resp *= FR_bandstop(freq, bs_freq, trans_width, band_width)
return freq_resp
def FR_lowpass(freq: np.ndarray, lp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for lowpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``lp_freq``: float
lowpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((freq - lp_freq) / sigma))
def FR_highpass(freq: np.ndarray, hp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for highpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``hp_freq``: float
highpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((hp_freq - freq) / sigma))
def FR_bandstop(freq: np.ndarray, bs_freq: float,
trans_width: float, band_width: float) -> np.ndarray:
"""Frequency responce for bandstop filter
Parameters
----------
``freq``: np.ndarray
frequency array
``bs_freq``: float
bandstop frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
left = FR_lowpass(freq, bs_freq - band_width / 2., trans_width)
right = FR_highpass(freq, bs_freq + band_width / 2., trans_width)
return rescale(left + right)
|
[
"scipy.fftpack.rfftfreq",
"scipy.fftpack.rfft",
"numpy.ones_like",
"numpy.apply_along_axis",
"numpy.exp",
"scipy.fftpack.irfft"
] |
[((1143, 1157), 'scipy.fftpack.rfftfreq', 'rfftfreq', (['T', 'd'], {}), '(T, d)\n', (1151, 1157), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((1171, 1190), 'scipy.fftpack.rfft', 'rfft', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (1175, 1190), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((1372, 1428), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: x * freq_resp)', '(-1)', 'f_data'], {}), '(lambda x: x * freq_resp, -1, f_data)\n', (1391, 1428), True, 'import numpy as np\n'), ((1449, 1462), 'scipy.fftpack.irfft', 'irfft', (['f_data'], {}), '(f_data)\n', (1454, 1462), False, 'from scipy.fftpack import rfft, irfft, rfftfreq\n'), ((2265, 2283), 'numpy.ones_like', 'np.ones_like', (['freq'], {}), '(freq)\n', (2277, 2283), True, 'import numpy as np\n'), ((3115, 3147), 'numpy.exp', 'np.exp', (['((freq - lp_freq) / sigma)'], {}), '((freq - lp_freq) / sigma)\n', (3121, 3147), True, 'import numpy as np\n'), ((3656, 3688), 'numpy.exp', 'np.exp', (['((hp_freq - freq) / sigma)'], {}), '((hp_freq - freq) / sigma)\n', (3662, 3688), True, 'import numpy as np\n')]
|
import os
import sys
import re
import shutil
import importlib.util
import numpy as np
from datetime import datetime
import time
import pathlib
import logging
import PSICT_UIF._include36._LogLevels as LogLevels
## Worker script breakpoints - DO NOT MODIFY
OPTIONS_DICT_BREAKPOINT = '## OPTIONS DICT BREAKPOINT'
SCRIPT_COPY_BREAKPOINT = '## SCRIPT COPY BREAKPOINT'
##############################################################################
## Grouping formatting styles for worker script values
format_groups = {}
format_groups['GHz .6'] = ['readout_frequency_opt', 'qubit_frequency_opt', 'magnon_frequency_opt', 'pump_frequency_opt']
format_groups['MHz int'] = ['readout_IF_frequency', 'qubit_IF_frequency', 'magnon_IF_frequency', 'pump_IF_frequency']
format_groups['MHz .3'] = ['intentional_detuning', 'optimal_detuning']
format_groups['int'] = ['readout_LO_power', 'qubit_LO_power', 'magnon_LO_power', 'pump_IF_frequency', 'SQPG_truncation_range']
format_groups['.2'] = ['magnon_amplitude_alpha', 'magnon_amplitude_beta', 'magnon_phase_beta', 'n_m']
format_groups['.3'] = ['magnon_amplitude', 'pump_amplitude', 'qubit_amplitude', 'readout_amplitude']
format_groups['.4'] = ['readout_amplitude_opt']
format_groups['e rm0'] = ['N_shots', 'SQPG_sampling_rate', 'MultiPulse_sampling_rate', 'digitizer_sampling_rate', 'N_single_shots', 'N_repetitions', 'N_repetitions_2', 'N_pts']
format_groups['e-3 .6'] = ['current']
format_groups['ns'] = ['SQPG_sequence_duration', 'MultiPulse_sequence_duration', 'readout_plateau_opt', 'qubit_width_pi', 'qubit_plateau_pi', 'demodulation_skip_start', 'demodulation_length', 'qubit_width', 'qubit_plateau', 'magnon_width', 'magnon_plateau', 'tau_s', 'tau', 'tau_delay', 'digitizer_length']
format_groups['us'] = ['wait_time']
format_groups['list GHz rm0'] = ['readout_frequency_list', 'qubit_frequency_list', 'magnon_frequency_list']
format_groups['list MHz int'] = ['qubit_drive_detuning_list', 'intentional_detuning_list']
format_groups['list .3'] = ['readout_amplitude_list', 'qubit_amplitude_list', 'n_m_list', 'magnon_amplitude_alpha_list', 'magnon_real_alpha_list', 'magnon_imag_alpha_list']
format_groups['list ns'] = ['qubit_width_list', 'qubit_plateau_list', 'tau_list']
format_groups['dict ns:.6'] = ['qubit_amplitude_pi_dict', 'qubit_amplitude_pi_2_dict']
format_groups['dict ns:1.2'] = ['lambda_dict']
## Function to convert values to correct formatting style
def get_formatted_rep(key, value):
if isinstance(value, str):
value_rep = '\''+value+'\''
## Single-value formats
elif key in format_groups['GHz .6']:
value_rep = '{:.6f}e9'.format(value*1e-9)
elif key in format_groups['MHz int']:
value_rep = '{:.0f}e6'.format(value*1e-6)
elif key in format_groups['MHz .3']:
value_rep = '{:.3f}e6'.format(value*1e-6)
elif key in format_groups['int']:
value_rep = '{:d}'.format(int(value))
elif key in format_groups['.2']:
value_rep = '{:.2f}'.format(value)
elif key in format_groups['.3']:
value_rep = '{:.3f}'.format(value)
elif key in format_groups['.4']:
value_rep = '{:.4f}'.format(value)
elif key in format_groups['e rm0']:
mantissa, exponent = '{:e}'.format(value).split('e')
value_rep = mantissa.rstrip('0').rstrip('.')+'e'+exponent.lstrip('+')
elif key in format_groups['e-3 .6']:
value_rep = '{:.6f}e-3'.format(value*1e3)
elif key in format_groups['ns']:
value_rep = '{:.0f}e-9'.format(value*1e9)
elif key in format_groups['us']:
value_rep = '{:.0f}e-6'.format(value*1e6)
## List formats
elif key in format_groups['list GHz rm0']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = ''.join(['{:f}'.format(start*1e-9).rstrip('0'), 'e9'])
str_stop = ''.join(['{:f}'.format(stop*1e-9).rstrip('0'), 'e9'])
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list MHz int']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = ''.join(['{:.0f}'.format(start*1e-6), 'e6'])
str_stop = ''.join(['{:.0f}'.format(stop*1e-6), 'e6'])
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list .3']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = '{:.3f}'.format(start)
str_stop = '{:.3f}'.format(stop)
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list ns']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = '{:.0f}e-9'.format(start*1e9)
str_stop = '{:.0f}e-9'.format(stop*1e9)
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
## Dict formats
elif key in format_groups['dict ns:.6']:
value_rep = '{'
for inner_key, inner_value in value.items():
key_string = '{:.0f}e-9'.format(inner_key*1e9)
value_string = '{:.6f}'.format(inner_value)
value_rep += key_string+': '+value_string+', '
value_rep += '}'
elif key in format_groups['dict ns:1.2']:
value_rep = '{'
for inner_key, inner_value in value.items():
key_string = '{:.0f}e-9'.format(inner_key*1e9)
value_string = '{:1.3f}'.format(inner_value)
value_rep += key_string+': '+value_string+', '
value_rep += '}'
## new formats go here...
else:
# print(key, 'is not a special class')
value_rep = str(value)
return value_rep
##############################################################################
## Labber Data folder structure
def split_labber_data_dir(original_dir):
head, Data_MMDD_folder = os.path.split(original_dir)
head, MM_folder = os.path.split(head)
head, YYYY_folder = os.path.split(head)
return head, YYYY_folder, MM_folder, Data_MMDD_folder
def update_labber_dates_dir(original_dir, time_obj = datetime.now()):
## Separate path into parts
head, old_year_folder, old_month_folder, old_Data_folder = split_labber_data_dir(original_dir.rstrip('/'))
## Create updated year folder
year_folder = '{:%Y}'.format(time_obj)
## Create updated month folder
month_folder = '{:%m}'.format(time_obj)
## Create updated Data_MMDD folder
Data_folder = 'Data_{:%m%d}'.format(time_obj)
return pathlib.Path(head, year_folder, month_folder, Data_folder)
def increment_filename(fname_in):
'''
Re-implementation of the PSICT-UIF filename incrementation procedure.
'''
## Split the file name into a head and sequential id
fname_split = re.split(r'(\d+$)', fname_in) # split by int searching from back
if len(fname_split) < 2: # could not split properly
raise RuntimeError("Could not identify sequential ID in filename:", fname_in)
fname_head = fname_split[0]
fname_id = fname_split[1]
## Increment the id
new_id = increment_string(fname_id)
## Put path back together
new_fname = "".join([fname_head, new_id])
return new_fname
def increment_string(str_in):
'''
Increment a string, preserving leading zeros.
eg "00567" -> "00568"
'''
return str(int(str_in)+1).zfill(len(str_in))
##############################################################################
## User interaction for hardware changes
def get_user_confirmation(message, MAX_ATTEMPTS = 5):
'''
Wait for a response from the user; use to hold off experiments until hardware changes have been carried out.
'''
n_attempts = 0
positive_response = False
while n_attempts < MAX_ATTEMPTS:
## Print and ask for input
print(message)
user_response = input('Confirm? ({:d}/{:d}) [y/N] '.format(n_attempts+1, MAX_ATTEMPTS))
if user_response == '' or not user_response.lower()[0] == 'y':
print('Response negative; please try again.')
else:
print('Positive response received; continuing...')
positive_response = True
break
## Increment to prevent infinite loops
n_attempts += 1
## Raise error if number of attempts has run out
if not positive_response:
raise RuntimeError('Maximum number of confirmation attempts exceeded; stopping execution.')
##############################################################################
def scan_worker_blocks(worker_file):
'''
Scan the worker file and return the blocks corresponding to its different parts.
'''
## re matches for options dicts
re_match_pulse_sequence = re.compile('pulse_sequence ?= ?')
re_match_PSICT_options = re.compile('worker_PSICT_options ?= ?')
re_match_general_options = re.compile('worker_general_options ?= ?')
re_match_pulse_sequence_options = re.compile('worker_pulse_sequence_options ?= ?')
re_match_script_copy_breakpoint = re.compile(SCRIPT_COPY_BREAKPOINT)
## Prepare empty lists
header_block = []
PSICT_options_block = []
general_options_block = []
pulse_sequence_options_block = []
end_block = []
with open(worker_file, 'r') as worker:
line = worker.readline()
## Read up to 'pulse_sequence = ...'
while not re_match_pulse_sequence.match(line):
header_block.append(line)
line = worker.readline()
## Skip actual 'pulse_sequence = ...' line
line = worker.readline()
## Read up to worker_PSICT_options
while not re_match_PSICT_options.match(line):
line = worker.readline()
## Read up to worker_general_options and add to PSICT_options_block
while not re_match_general_options.match(line):
PSICT_options_block.append(line)
line = worker.readline()
## Read up to worker_pulse_sequence_options and add to general_options_block
while not re_match_pulse_sequence_options.match(line):
general_options_block.append(line)
line = worker.readline()
## Read up to script copy breakpoint and add to pulse_sequence_options_block
while not re_match_script_copy_breakpoint.match(line):
pulse_sequence_options_block.append(line)
line = worker.readline()
## Read the rest of the file and add to end_block
end_block = worker.readlines()
return header_block, PSICT_options_block, general_options_block, pulse_sequence_options_block, end_block
##############################################################################
##############################################################################
class WorkerScriptManager:
def __init__(self, worker_script, PSICT_config):
## Load config (log after logger initialized)
self.set_PSICT_config(PSICT_config)
## Logging
self.init_logging()
## Log config loading for debugging
self.logger.log(LogLevels.VERBOSE, 'Config file loaded from path: {}'.format(self.PSICT_config_path))
## Set up flags
self._iscopied_master = False
## Get file details for master script copying
self._master_wd = os.getcwd()
self._master_inv = sys.argv[0]
self._master_target_dir = None
## Create block placeholders to enable dict setters to function correctly
self.PSICT_options_block = []
self.general_options_block = []
self.pulse_sequence_options_block = []
## Set worker script path
self._worker_path = worker_script
self.refresh_worker()
def set_PSICT_config(self, PSICT_config_path):
self.PSICT_config_path = PSICT_config_path
## Import config file as module
config_spec = importlib.util.spec_from_file_location('', self.PSICT_config_path)
self._PSICT_config = importlib.util.module_from_spec(config_spec)
config_spec.loader.exec_module(self._PSICT_config)
#############################################################################
## Logging
def init_logging(self):
'''
Initialize logging for the WorkerScriptManager.
'''
## Add extra logging levels
logging.addLevelName(LogLevels.ALL, 'ALL')
logging.addLevelName(LogLevels.TRACE, 'TRACE')
logging.addLevelName(LogLevels.VERBOSE, 'VERBOSE')
logging.addLevelName(LogLevels.SPECIAL, 'SPECIAL')
## Init logger
logger_name = 'WSMgr'
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(LogLevels.ALL) # Log all possible events
## Add handlers if there are none already added - code copied from psictUIFInterface module
if len(self.logger.handlers) == 0:
## Console stream handler
if self._PSICT_config.logging_config['console_log_enabled']:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self._PSICT_config.logging_config['console_log_level'])
console_fmt = logging.Formatter(self._PSICT_config.logging_config['console_fmt'], \
datefmt = self._PSICT_config.logging_config['console_datefmt'])
console_handler.setFormatter(console_fmt)
## Add handler to logger
self.logger.addHandler(console_handler)
## File handler
if self._PSICT_config.logging_config['file_log_enabled']:
log_dir = self._PSICT_config.logging_config['log_dir']
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = self._PSICT_config.logging_config['log_file'].format(datetime.now())+'.log'
log_path = os.path.join(log_dir, log_file)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self._PSICT_config.logging_config['file_log_level'])
file_fmt = logging.Formatter(self._PSICT_config.logging_config['file_fmt'], \
datefmt = self._PSICT_config.logging_config['file_datefmt'])
file_handler.setFormatter(file_fmt)
## Add handler to logger
self.logger.addHandler(file_handler)
## Add NullHandler if no other handlers are configured
if len(self.logger.handlers) == 0:
self.logger.addHandler(logging.NullHandler())
## Status message
self.logger.debug('Logging initialization complete.')
def log(self, msg, loglevel = 'special', *args, **kwargs):
'''
Log a message to the logger at the specified level.
This method should be used instead of bare `print` functions in scripts at the master level. This method should NOT be used internally within the WorkerScriptManager or related classes.
Log levels can be specified as an integer (the usual way), but can also be string corresponding to the name of the level. Available options are: TRACE, DEBUG, VERBOSE, INFO, SPECIAL, WARNING, ERROR, CRITICAL. Specifying an unsupported string will result in a logged ERROR-level message, but no execution error.
'''
if isinstance(loglevel, str):
## Convert to lowercase
loglevel = loglevel.lower()
## Convert string to appropriate level
if loglevel == 'trace':
lvl = LogLevels.TRACE
elif loglevel == 'debug':
lvl = LogLevels.DEBUG
elif loglevel == 'verbose':
lvl = LogLevels.VERBOSE
elif loglevel == 'info':
lvl = LogLevels.INFO
elif loglevel == 'special':
lvl = LogLevels.SPECIAL
elif loglevel == 'warning':
lvl = LogLevels.WARNING
elif loglevel == 'error':
lvl = LogLevels.ERROR
elif LogLevel == 'critical':
lvl = LogLevels.CRITICAL
else:
self.logger.error('Invalid loglevel string specified in call to log(): {}'.format(loglevel))
return
else: # loglevel is assumed to be numeric
lvl = loglevel
## Log message
self.logger.log(lvl, msg, *args, **kwargs)
#############################################################################
## Working with parameter dicts and text blocks
@property
def PSICT_options(self):
return self._PSICT_options
@PSICT_options.setter
def PSICT_options(self, new_PSICT_options):
## Update stored parameter dict
self._PSICT_options = new_PSICT_options
## Update stored block
self.update_block(self.PSICT_options_block, self._PSICT_options)
@property
def general_options(self):
return self._general_options
@general_options.setter
def general_options(self, new_general_options):
## Update stored parameter dict
self._general_options = new_general_options
## Update stored block
self.update_block(self.general_options_block, self._general_options)
@property
def pulse_sequence_options(self):
return self._pulse_sequence_options
@pulse_sequence_options.setter
def pulse_sequence_options(self, new_pulse_sequence_options):
## Update stored parameter dict
self._pulse_sequence_options = new_pulse_sequence_options
## Update stored block
self.update_block(self.pulse_sequence_options_block, self._pulse_sequence_options, nested_dicts = True)
def refresh_worker(self):
'''
Mount the worker and pull values from it.
'''
self.logger.debug('Refreshing worker...')
self.mount_worker()
self.pull_from_worker()
def mount_worker(self):
'''
(Re)-import/'mount' the worker script.
'''
## Invalidate caches as well, just in case
importlib.invalidate_caches()
## Wait 1 second before mounting the worker - avoids blocking reload of module
time.sleep(1)
## Import worker script as module
worker_spec = importlib.util.spec_from_file_location('', self._worker_path)
self._worker_script = importlib.util.module_from_spec(worker_spec)
worker_spec.loader.exec_module(self._worker_script)
## Status message
self.logger.debug('Worker file mounted as module.')
def pull_from_worker(self):
'''
Pull option values from the worker script.
'''
self.logger.log(LogLevels.TRACE, 'Pulling options dicts from worker...')
## Scan blocks from worker - done first to avoid no-matches when updating options dicts
scanned_blocks = scan_worker_blocks(self._worker_path)
## Allocate worker blocks to specific attributes
self.header_block = scanned_blocks[0]
self.PSICT_options_block = scanned_blocks[1]
self.general_options_block = scanned_blocks[2]
self.pulse_sequence_options_block = scanned_blocks[3]
self.end_block = scanned_blocks[4]
## Import options dicts from worker script
self.PSICT_options = self._worker_script.worker_PSICT_options
self.general_options = self._worker_script.worker_general_options
self.pulse_sequence_options = self._worker_script.worker_pulse_sequence_options
## Status message
self.logger.debug('Pulled options dicts from worker.')
def get_parameters(self):
'''
Convenience method for returning all three options dicts
'''
return self.PSICT_options, self.general_options, self.pulse_sequence_options
def set_parameters(self, new_PSICT_options, new_general_options, new_pulse_sequence_options):
'''
Set stored parameter dicts (and blocks).
'''
self.logger.log(LogLevels.VERBOSE, 'Setting parameters...')
## Update stored dicts and blocks
self.PSICT_options = new_PSICT_options
self.general_options = new_general_options
self.pulse_sequence_options = new_pulse_sequence_options
def update_parameters(self):
'''
Update script based on stored parameters, and then refresh stored parameters from script.
'''
self.logger.log(LogLevels.VERBOSE, 'Cycling parameters through worker...')
## Push to worker
self.update_script(copy = False)
## Refresh worker and pull
self.refresh_worker()
def update_block(self, block, options_dict = {}, nested_dicts = False):
if nested_dicts:
for outer_key, nested_dict in options_dict.items():
## Define top-level match object (pulse sequence name)
re_outer_match = re.compile('\t*[\"\']'+str(outer_key)+'[\'\"]:')
## Iterate over keys in the sub-dict
for inner_key, inner_value in nested_dict.items():
## Define inner match object
re_inner_match = re.compile('\t*[\"\']'+str(inner_key)+'[\"\'] ?: ?')
## Find sub-block by top-level match
outer_key_found = False
inner_key_found = False
for line_index, line in enumerate(block):
if outer_key_found:
## Check for inner match
match_obj = re_inner_match.match(line)
if match_obj:
self.logger.log(LogLevels.TRACE, 'Key {} matches line at index {}'.format(inner_key, line_index))
## Get specific formatting
value_rep = get_formatted_rep(inner_key, inner_value)
## Replace line in block
block[line_index] = ''.join([match_obj.group(), value_rep, ','])
## Stop searching for key
inner_key_found = True
break
else:
## Check for outer match
if re_outer_match.match(line):
outer_key_found = True
self.logger.log(LogLevels.TRACE, 'Outer key {} matches line at index {}'.format(outer_key, line_index))
continue
## End looping over lines
if not inner_key_found:
self.logger.warning('Match not found for key: {}'.format(inner_key))
else:
## Iterate over options_dict keys
for key, value in options_dict.items():
## Generate re match object
re_match = re.compile('\t*[\"\']'+str(key)+'[\"\'] ?: ?')
## Attempt to find a match in the block
key_found = False
for line_index, line in enumerate(block):
match_obj = re_match.match(line)
if match_obj:
self.logger.log(LogLevels.TRACE, 'Key {} matches line at index {}'.format(key, line_index))
## Get specific formatting
value_rep = get_formatted_rep(key, value)
## Replace line in block
block[line_index] = "".join([match_obj.group(), value_rep, ','])
## Stop searching for key
key_found = True
break
## End looping over lines
if not key_found:
self.logger.warning('Match not found for key: {}'.format(key))
return block
#############################################################################
## Writing text blocks to new worker file
def write_block(self, stream, block):
for line in block:
stream.write(line.strip('\n')+'\n')
def write_new_script(self, new_script_path):
with open(new_script_path, 'w') as new_script:
self.write_block(new_script, self.header_block)
new_script.write('pulse_sequence = \''+self._pulse_sequence_name+'\'\n\n')
self.write_block(new_script, self.PSICT_options_block)
self.write_block(new_script, self.general_options_block)
self.write_block(new_script, self.pulse_sequence_options_block)
new_script.write(SCRIPT_COPY_BREAKPOINT+'\n')
self.write_block(new_script, self.end_block)
#############################################################################
## Update the script (ie write and copy)
def set_script_copy_target_dir(self, script_copy_target_dir):
self.target_dir = script_copy_target_dir
def set_master_copy_target_dir(self, master_copy_target_dir):
self._master_target_dir = master_copy_target_dir
def update_script(self, copy = False, target_filename = None, output_path = None):
'''
Docstring
'''
## Status message
self.logger.debug('Updating worker script; copy option is {}'.format(copy))
## Update the original worker script file
self.write_new_script(self._worker_path)
if copy:
## Get a target filename from either the given filename or path
if target_filename is not None:
self.target_file = target_filename
elif output_path is not None:
self.target_file = ''.join([os.path.splitext(os.path.basename(output_path))[0], self._PSICT_config.script_copy_postfix, '.py'])
else:
raise RuntimeError('The target must be specified through either a filename or a path.')
## Generate the full script target path
self.target_path = os.path.join(self.target_dir, self.target_file)
## Create target directory if it does not exist
if not os.path.exists(self.target_dir):
os.makedirs(self.target_dir)
## Copy worker script to target path
shutil.copy(self._worker_path, self.target_path)
# shutil.copy('worker_new.py', self.target_path)
#############################################################################
## Run measurement & do associated admin
def run_measurement(self, pulse_sequence_name):
'''
Docstring.
'''
## Status message
self.logger.info('Running measurement at master: {}'.format(pulse_sequence_name))
## Update pulse sequence name attribute
self._pulse_sequence_name = pulse_sequence_name
## Update parent logger name for worker script
PSICT_options = self.PSICT_options
PSICT_options['parent_logger_name'] = self.logger.name
self.PSICT_options = PSICT_options
## Update parameters: stored -> script -> stored
self.update_parameters()
## Execute measurement function
self.output_path = self._worker_script.run_pulse_sequence(self._pulse_sequence_name, \
self.PSICT_options, self.general_options, \
self.pulse_sequence_options)
## Get output filename and dir
self.output_filename = os.path.splitext(os.path.basename(self.output_path))[0]
self.output_dir = os.path.dirname(os.path.abspath(self.output_path))
# ## Log if required
# if self._logging:
# self._output_logger.add_entry(self.output_filename, self.output_dir, self._pulse_sequence_name)
## Copy master script if required
if not self._iscopied_master:
self.copy_master(self._master_target_dir)
## Update script (with copy)
self.update_script(copy = True, output_path = self.output_path)
## Increment filename in preparation for next measurement
self.PSICT_options['output_file'] = increment_filename(self.output_filename)
## Set parameters
self.set_parameters(self.PSICT_options, self.general_options, self.pulse_sequence_options)
## Update script (incremented filename), with no copy
self.update_parameters()
## Status message
self.logger.info('Running measurement completed at master.')
def update_date(self):
## Update output dir based on today's date
self.PSICT_options['output_dir'] = update_labber_dates_dir(self._PSICT_options['output_dir'])
def copy_master(self, master_dir_target = None):
'''
Copy the master script to the script_copy_target_dir
'''
## Use script_copy_target_dir if no alternative is provided
if master_dir_target is None:
master_dir_target = self.target_dir
## Create target dir if it does not exist
pathlib.Path(master_dir_target).mkdir(parents = True, exist_ok = True)
## Get full path to master file
master_path_original = os.path.join(self._master_wd, self._master_inv)
## Construct filename for target
master_file_target = ''.join([self.output_filename, '_master.py'])
## Construct full path for target
master_path_target = os.path.join(master_dir_target, master_file_target)
## Copy master file
self._master_path_new = shutil.copy(master_path_original, master_path_target)
self.logger.log(LogLevels.SPECIAL, 'Master script copied to: {:s}'.format(self._master_path_new))
## Set flag
self._iscopied_master = True
##
|
[
"logging.addLevelName",
"logging.Formatter",
"pathlib.Path",
"logging.NullHandler",
"os.path.join",
"shutil.copy",
"os.path.abspath",
"logging.FileHandler",
"os.path.exists",
"datetime.datetime.now",
"re.split",
"os.path.basename",
"logging.StreamHandler",
"time.sleep",
"re.compile",
"os.makedirs",
"os.getcwd",
"os.path.split",
"logging.getLogger"
] |
[((6010, 6037), 'os.path.split', 'os.path.split', (['original_dir'], {}), '(original_dir)\n', (6023, 6037), False, 'import os\n'), ((6060, 6079), 'os.path.split', 'os.path.split', (['head'], {}), '(head)\n', (6073, 6079), False, 'import os\n'), ((6104, 6123), 'os.path.split', 'os.path.split', (['head'], {}), '(head)\n', (6117, 6123), False, 'import os\n'), ((6236, 6250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6248, 6250), False, 'from datetime import datetime\n'), ((6652, 6710), 'pathlib.Path', 'pathlib.Path', (['head', 'year_folder', 'month_folder', 'Data_folder'], {}), '(head, year_folder, month_folder, Data_folder)\n', (6664, 6710), False, 'import pathlib\n'), ((6911, 6940), 're.split', 're.split', (['"""(\\\\d+$)"""', 'fname_in'], {}), "('(\\\\d+$)', fname_in)\n", (6919, 6940), False, 'import re\n'), ((8888, 8921), 're.compile', 're.compile', (['"""pulse_sequence ?= ?"""'], {}), "('pulse_sequence ?= ?')\n", (8898, 8921), False, 'import re\n'), ((8951, 8990), 're.compile', 're.compile', (['"""worker_PSICT_options ?= ?"""'], {}), "('worker_PSICT_options ?= ?')\n", (8961, 8990), False, 'import re\n'), ((9022, 9063), 're.compile', 're.compile', (['"""worker_general_options ?= ?"""'], {}), "('worker_general_options ?= ?')\n", (9032, 9063), False, 'import re\n'), ((9102, 9150), 're.compile', 're.compile', (['"""worker_pulse_sequence_options ?= ?"""'], {}), "('worker_pulse_sequence_options ?= ?')\n", (9112, 9150), False, 'import re\n'), ((9189, 9223), 're.compile', 're.compile', (['SCRIPT_COPY_BREAKPOINT'], {}), '(SCRIPT_COPY_BREAKPOINT)\n', (9199, 9223), False, 'import re\n'), ((11435, 11446), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11444, 11446), False, 'import os\n'), ((12454, 12496), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.ALL', '"""ALL"""'], {}), "(LogLevels.ALL, 'ALL')\n", (12474, 12496), False, 'import logging\n'), ((12505, 12551), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.TRACE', '"""TRACE"""'], {}), "(LogLevels.TRACE, 'TRACE')\n", (12525, 12551), False, 'import logging\n'), ((12560, 12610), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.VERBOSE', '"""VERBOSE"""'], {}), "(LogLevels.VERBOSE, 'VERBOSE')\n", (12580, 12610), False, 'import logging\n'), ((12619, 12669), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.SPECIAL', '"""SPECIAL"""'], {}), "(LogLevels.SPECIAL, 'SPECIAL')\n", (12639, 12669), False, 'import logging\n'), ((12745, 12775), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (12762, 12775), False, 'import logging\n'), ((18345, 18358), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (18355, 18358), False, 'import time\n'), ((29265, 29312), 'os.path.join', 'os.path.join', (['self._master_wd', 'self._master_inv'], {}), '(self._master_wd, self._master_inv)\n', (29277, 29312), False, 'import os\n'), ((29500, 29551), 'os.path.join', 'os.path.join', (['master_dir_target', 'master_file_target'], {}), '(master_dir_target, master_file_target)\n', (29512, 29551), False, 'import os\n'), ((29612, 29665), 'shutil.copy', 'shutil.copy', (['master_path_original', 'master_path_target'], {}), '(master_path_original, master_path_target)\n', (29623, 29665), False, 'import shutil\n'), ((26144, 26191), 'os.path.join', 'os.path.join', (['self.target_dir', 'self.target_file'], {}), '(self.target_dir, self.target_file)\n', (26156, 26191), False, 'import os\n'), ((26412, 26460), 'shutil.copy', 'shutil.copy', (['self._worker_path', 'self.target_path'], {}), '(self._worker_path, self.target_path)\n', (26423, 26460), False, 'import shutil\n'), ((27682, 27715), 'os.path.abspath', 'os.path.abspath', (['self.output_path'], {}), '(self.output_path)\n', (27697, 27715), False, 'import os\n'), ((13134, 13167), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (13155, 13167), False, 'import logging\n'), ((13295, 13429), 'logging.Formatter', 'logging.Formatter', (["self._PSICT_config.logging_config['console_fmt']"], {'datefmt': "self._PSICT_config.logging_config['console_datefmt']"}), "(self._PSICT_config.logging_config['console_fmt'], datefmt\n =self._PSICT_config.logging_config['console_datefmt'])\n", (13312, 13429), False, 'import logging\n'), ((14020, 14051), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (14032, 14051), False, 'import os\n'), ((14083, 14112), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (14102, 14112), False, 'import logging\n'), ((14231, 14359), 'logging.Formatter', 'logging.Formatter', (["self._PSICT_config.logging_config['file_fmt']"], {'datefmt': "self._PSICT_config.logging_config['file_datefmt']"}), "(self._PSICT_config.logging_config['file_fmt'], datefmt=\n self._PSICT_config.logging_config['file_datefmt'])\n", (14248, 14359), False, 'import logging\n'), ((14694, 14715), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (14713, 14715), False, 'import logging\n'), ((26272, 26303), 'os.path.exists', 'os.path.exists', (['self.target_dir'], {}), '(self.target_dir)\n', (26286, 26303), False, 'import os\n'), ((26321, 26349), 'os.makedirs', 'os.makedirs', (['self.target_dir'], {}), '(self.target_dir)\n', (26332, 26349), False, 'import os\n'), ((27601, 27635), 'os.path.basename', 'os.path.basename', (['self.output_path'], {}), '(self.output_path)\n', (27617, 27635), False, 'import os\n'), ((29123, 29154), 'pathlib.Path', 'pathlib.Path', (['master_dir_target'], {}), '(master_dir_target)\n', (29135, 29154), False, 'import pathlib\n'), ((13824, 13847), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (13838, 13847), False, 'import os\n'), ((13869, 13889), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (13880, 13889), False, 'import os\n'), ((13970, 13984), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13982, 13984), False, 'from datetime import datetime\n'), ((25855, 25884), 'os.path.basename', 'os.path.basename', (['output_path'], {}), '(output_path)\n', (25871, 25884), False, 'import os\n')]
|
import random
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import colorsys
import copy
def visualization(machines, jobs, algo):
# Declaring a figure "gnt"
fig, gnt = plt.subplots()
# Setting labels for x-axis and y-axis
gnt.set_xlabel('Processing Time')
gnt.set_ylabel('Machine')
yticks = []
ylabels = []
for i in range(len(machines)):
yt = 15 * (i + 1)
yl = i + 1
ylabels.append(str(yl))
yticks.append(yt)
color = []
for i in range(len(jobs)):
h, s, l = random.random(), 0.5 + random.random() / 2.0, 0.4 + random.random() / 5.0
r, g, b = [int(256 * i) for i in colorsys.hls_to_rgb(h, l, s)]
c = '#%02x%02x%02x' % (r, g, b)
color.append(c)
# print(color)
# color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(jobs))]
# Setting ticks on y-axis
gnt.set_yticks(yticks)
# Labelling tickes of y-axis
gnt.set_yticklabels(ylabels)
# print(yticks, ylabels)
# Setting graph attribute
gnt.grid(True)
# Declaring a bar in schedule
previous = 0
# for i in range (len(machines)):
# for j in range (len(machines[i])):
# gnt.broken_barh([(previous, machines[i][j][0])], ((i+1)*10, 9), facecolors =(color[machines[i][j][1]-1]), edgecolor = "black")
# previous += machines[i][j][0]
# previous = 0
for i in range(len(machines)):
for j in range(len(machines[i])):
if machines[i][j][1] != 0:
gnt.broken_barh([(previous, machines[i][j][0])], ((i + 1) * 10, 9),
facecolors=(color[machines[i][j][1] - 1]), edgecolor="black")
previous += machines[i][j][0]
else:
if (float(machines[i][j][0])) != 0:
gnt.broken_barh([(previous, (float(machines[i][j][0])))], ((i + 1) * 10, 9), facecolors='white',
edgecolor="black")
previous += (float(machines[i][j][0]))
previous = 0
plt.yticks([])
fig.set_size_inches(37, 21)
plt.title(algo)
plt.show()
plt.savefig("{}.png".format(algo))
# mpimg.imsave("{}.png".format(algo), fig)
def evan(machines, jobs, c):
machines[0].append([jobs[0][0], 1])
high = 0
low = 1
for index2, item in enumerate(jobs):
if index2 == 0:
continue
if makespan_machines(machines[high]) + item[0] - makespan_machines(machines[low]) <= makespan_machines(
machines[low]):
machines[high].append([item[0], index2 + 1])
elif (item[0] / 2 + c > item[0]) or min(makespan_machines(machines[low]) + item[0],
makespan_machines(machines[high])) >= max(
makespan_machines(machines[low]) + item[0], makespan_machines(machines[high])) - min(
makespan_machines(machines[low]) + item[0], makespan_machines(machines[high])):
machines[low].append([item[0], index2 + 1])
else:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0]/2+c, index2+1])
machines[high].append([item[0] / 2 + c, index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
def evan_greedy(machines, jobs, c):
low = 0
high = 1
for index2, item in enumerate(jobs):
if item[0]/2 + c < makespan_machines(machines[low]) + item[0]:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0] / 2 + c, index2+1])
machines[high].append([item[0] / 2 + c, index2+1])
else:
machines[low].append([item[0]/2+c, index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
def evan_76(machines, jobs, c):
machines[0].append([jobs[0][0], 1])
high = 0
low = 1
job_sum = jobs[0][0]
r4 = 0
for index2, item in enumerate(jobs):
if index2 == 0:
continue
job_sum += item[0]
if makespan_machines(machines[high]) + item[0] <= 7/12 * job_sum:
machines[high].append([item[0], index2+1])
elif max(makespan_machines(machines[low])+item[0], makespan_machines(machines[high])) <= 7/12 * job_sum:
machines[low].append([item[0], index2+1])
elif makespan_machines(machines[high]) + item[0]/2 + c <= 7/12 * job_sum:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0] / 2 + c, index2 + 1])
machines[high].append([item[0] / 2 + c, index2 + 1])
else:
r4 += 1
machines[low].append([item[0], index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
# print(makespan(machines)*2/sum([makespan_machines(machines[0])+makespan_machines(machines[1])]))
return r4
def LS(machines, jobs):
min = machines[0][0]
index = 0
for index2, item in enumerate(jobs):
print(machines)
min = machines[index][0]
for i in range(0, len(machines)):
if machines[i][0] < min:
min = machines[i][0]
index = i
insert_job(machines, item[0], index)
machines[index].append([item[0], index2 + 1])
LS_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return LS_makespan
def SET(machines, jobs, c):
m = len(machines) - 1
for index, item in enumerate(jobs):
kj = 0
k = min(m, math.sqrt(item[0] / c))
if k == m:
kj = m
elif (item[0] / (math.floor(k)) + (math.floor(k) - 1) * c) <= (
item[0] / (math.ceil(k)) + (math.ceil(k) - 1) * c):
kj = math.floor(k)
else:
kj = math.ceil(k)
machines.sort(key=lambda machines: machines[0])
sj = machines[kj - 1][0]
for j in range(kj):
# if machines[j][0] == 0:
# machines[j].append([sj + (item[0]/(kj)+(kj-1)*c), index+1])
# else:
machines[j].append([str(sj - machines[j][0]), 0])
machines[j].append([(item[0] / (kj) + (kj - 1) * c), index + 1])
machines[j][0] = sj + (item[0] / (kj) + (kj - 1) * c)
SET_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return SET_makespan
def LPT(machines, jobs):
jobs.sort(key=lambda jobs: jobs[0], reverse=True)
min = machines[0][0]
index = 0
for index2, item in enumerate(jobs):
min = machines[index][0]
for i in range(0, len(machines)):
if machines[i][0] < min:
min = machines[i][0]
index = i
insert_job(machines, item[0], index)
machines[index].append([item[0], index2 + 1])
LPT_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return LPT_makespan
def makespan_machines(machine):
return sum([float(machine[i][0]) for i in range(len(machine))])
def makespan(machine_list):
return max([makespan_machines(machine_list[i]) for i in range(len(machine_list))])
def insert_job(machine_list, job, machine_index):
machine_list[machine_index][0] += job
def main(m, nj):
machines = []
jobs = []
for i in range(m):
machines.append([0])
for i in range(nj):
jobs.append([random.randint(1, 10)])
machines_evan = [[], []]
# machines2 = copy.deepcopy(machines)
# machines3 = copy.deepcopy(machines)
#
# makespan = SET(machines, jobs, 1)
# makespan2 = LS(machines2, jobs)
# makespan3 = LPT(machines3, jobs)
#
# visualization(machines, jobs, "SET Algorithm")
# visualization(machines2, jobs, "LS Algorithm")
# visualization(machines3, jobs, "LPT Algorithm")
# print(makespan)
# for i in machines:
# print(makespan_machines(i), i)
# print(jobs)
# LS(machines, jobs)
# for i in machines:
# print(i)
evan_76(machines_evan, jobs, 3)
print(jobs)
for i in machines_evan:
print(i)
print(makespan(machines_evan)*2/sum([jobs[i][0] for i in range(len(jobs))]))
# visualization(machines_evan, jobs, "Evan algo")
def main_stimulation_2machines():
maxi = 0
max_jobs = []
c_max = 0
jobs_range = 0
no_job_max = 0
no_r4 = 0
max_r4 = 0
for j in range(50000):
nj = random.randint(1000,2000)
jobs = []
jr = random.randint(1,500)
for i in range(nj):
jobs.append([random.randint(1, jr)])
machines_evan = [[], []]
cr = random.randint(2000,3000)
no_r4 = evan_76(machines_evan, jobs, cr)
k = makespan(machines_evan)*2/sum([jobs[i][0] for i in range(len(jobs))])
if k > maxi:
maxi = k
max_jobs = copy.deepcopy(jobs)
c_max = cr
jobs_range = jr
no_job_max = nj
max_r4 = no_r4
print(maxi, no_r4)
print(maxi, max_r4)
print(max_jobs)
print(c_max)
print(jobs_range)
print(no_job_max)
# main_stimulation_2machines()
# c = 4
# epsilon = 0.01
# l = []
# for i in range (300):
# if i == 0:
# l.append([12*c-epsilon])
# else:
# l.append([12*(6**i)*c-epsilon])
# m = [[],[]]
# print(evan_76(m, l, c))
# print(l)
# for i in (m):
# print(i)
# avg = sum(l[i][0] for i in range(len(l)))
# print(len(l))
# print(makespan(m), avg)
# print(makespan(m)*2/avg)
def find_counter(c):
epsilon = 0.0001*c
job = [[], []]
job[0].append(12*c-epsilon)
li = 0
si = 1
for i in range (1000):
# print("Bound:", 7/5 * (sum(job[0])+sum(job[1])) - 12/5 * sum(job[si]), 12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon)
k = 12 * sum(job[li]) + 12 * c - 7 * (sum(job[0]) + sum(job[1])) - epsilon
while epsilon/(12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon) < 10**(-10):
epsilon *= 10
if math.isnan(k):
break
job[si].append(12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon)
if sum(job[0]) > sum(job[1]):
li = 0
si = 1
else:
si = 0
li = 1
# for i in (job):
# print(i)
fin_job = []
for i in range(len(job[1])):
for j in range(len(job)):
fin_job.append([job[j][i]])
return fin_job
max_approx = 0
for c in range(1,50):
l = find_counter(c)
m = [[],[]]
print(evan_76(m, l, c))
# print(len(l))
for i in (m):
print(i)
avg = max(sum([l[i][0] for i in range(len(l))])/2, max([l[i][0] for i in range(len(l))]))
# print(len(l))
# print(makespan(m), avg)
# print(makespan(m)/avg)
if makespan(m)/avg > max_approx:
max_approx = makespan(m)/avg
print(max_approx)
|
[
"matplotlib.pyplot.title",
"math.isnan",
"copy.deepcopy",
"matplotlib.pyplot.show",
"random.randint",
"math.sqrt",
"math.ceil",
"matplotlib.pyplot.yticks",
"math.floor",
"random.random",
"colorsys.hls_to_rgb",
"matplotlib.pyplot.subplots"
] |
[((208, 222), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (220, 222), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2141), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2137, 2141), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2193), 'matplotlib.pyplot.title', 'plt.title', (['algo'], {}), '(algo)\n', (2187, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2208), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2206, 2208), True, 'import matplotlib.pyplot as plt\n'), ((9031, 9057), 'random.randint', 'random.randint', (['(1000)', '(2000)'], {}), '(1000, 2000)\n', (9045, 9057), False, 'import random\n'), ((9088, 9110), 'random.randint', 'random.randint', (['(1)', '(500)'], {}), '(1, 500)\n', (9102, 9110), False, 'import random\n'), ((9233, 9259), 'random.randint', 'random.randint', (['(2000)', '(3000)'], {}), '(2000, 3000)\n', (9247, 9259), False, 'import random\n'), ((10619, 10632), 'math.isnan', 'math.isnan', (['k'], {}), '(k)\n', (10629, 10632), False, 'import math\n'), ((571, 586), 'random.random', 'random.random', ([], {}), '()\n', (584, 586), False, 'import random\n'), ((6105, 6127), 'math.sqrt', 'math.sqrt', (['(item[0] / c)'], {}), '(item[0] / c)\n', (6114, 6127), False, 'import math\n'), ((9455, 9474), 'copy.deepcopy', 'copy.deepcopy', (['jobs'], {}), '(jobs)\n', (9468, 9474), False, 'import copy\n'), ((686, 714), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['h', 'l', 's'], {}), '(h, l, s)\n', (705, 714), False, 'import colorsys\n'), ((6324, 6337), 'math.floor', 'math.floor', (['k'], {}), '(k)\n', (6334, 6337), False, 'import math\n'), ((6369, 6381), 'math.ceil', 'math.ceil', (['k'], {}), '(k)\n', (6378, 6381), False, 'import math\n'), ((8004, 8025), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (8018, 8025), False, 'import random\n'), ((594, 609), 'random.random', 'random.random', ([], {}), '()\n', (607, 609), False, 'import random\n'), ((623, 638), 'random.random', 'random.random', ([], {}), '()\n', (636, 638), False, 'import random\n'), ((9163, 9184), 'random.randint', 'random.randint', (['(1)', 'jr'], {}), '(1, jr)\n', (9177, 9184), False, 'import random\n'), ((6192, 6205), 'math.floor', 'math.floor', (['k'], {}), '(k)\n', (6202, 6205), False, 'import math\n'), ((6266, 6278), 'math.ceil', 'math.ceil', (['k'], {}), '(k)\n', (6275, 6278), False, 'import math\n'), ((6210, 6223), 'math.floor', 'math.floor', (['k'], {}), '(k)\n', (6220, 6223), False, 'import math\n'), ((6283, 6295), 'math.ceil', 'math.ceil', (['k'], {}), '(k)\n', (6292, 6295), False, 'import math\n')]
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "IP Check"
class Input:
ADDRESS = "address"
class Output:
ADDRESS = "address"
FOUND = "found"
STATUS = "status"
URL = "url"
class LookupInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "Address",
"description": "IPv4 Address",
"order": 1
}
},
"required": [
"address"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class LookupOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "Address",
"description": "IP address that was found",
"order": 3
},
"found": {
"type": "boolean",
"title": "Found",
"description": "Found status",
"order": 1
},
"status": {
"type": "string",
"title": "Status",
"description": "Error message",
"order": 4
},
"url": {
"type": "string",
"title": "URL",
"description": "URL of reputation list",
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
[
"json.loads"
] |
[((307, 575), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "address": {\n "type": "string",\n "title": "Address",\n "description": "IPv4 Address",\n "order": 1\n }\n },\n "required": [\n "address"\n ]\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "address": {\n "type": "string",\n "title": "Address",\n "description": "IPv4 Address",\n "order": 1\n }\n },\n "required": [\n "address"\n ]\n}\n """\n )\n', (317, 575), False, 'import json\n'), ((699, 1327), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "address": {\n "type": "string",\n "title": "Address",\n "description": "IP address that was found",\n "order": 3\n },\n "found": {\n "type": "boolean",\n "title": "Found",\n "description": "Found status",\n "order": 1\n },\n "status": {\n "type": "string",\n "title": "Status",\n "description": "Error message",\n "order": 4\n },\n "url": {\n "type": "string",\n "title": "URL",\n "description": "URL of reputation list",\n "order": 2\n }\n }\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "address": {\n "type": "string",\n "title": "Address",\n "description": "IP address that was found",\n "order": 3\n },\n "found": {\n "type": "boolean",\n "title": "Found",\n "description": "Found status",\n "order": 1\n },\n "status": {\n "type": "string",\n "title": "Status",\n "description": "Error message",\n "order": 4\n },\n "url": {\n "type": "string",\n "title": "URL",\n "description": "URL of reputation list",\n "order": 2\n }\n }\n}\n """\n )\n', (709, 1327), False, 'import json\n')]
|
from gym.envs.registration import register
## off-policy variBAD benchmark
register(
"PointRobot-v0",
entry_point="envs.meta.toy_navigation.point_robot:PointEnv",
kwargs={"max_episode_steps": 60, "n_tasks": 2},
)
register(
"PointRobotSparse-v0",
entry_point="envs.meta.toy_navigation.point_robot:SparsePointEnv",
kwargs={"max_episode_steps": 60, "n_tasks": 2, "goal_radius": 0.2},
)
register(
"Wind-v0",
entry_point="envs.meta.toy_navigation.wind:WindEnv",
)
register(
"HalfCheetahVel-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.half_cheetah_vel:HalfCheetahVelEnv",
"max_episode_steps": 200,
},
max_episode_steps=200,
)
## on-policy variBAD benchmark
register(
"AntDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.ant_dir:AntDirEnv",
"max_episode_steps": 200,
"forward_backward": True,
"n_tasks": None,
},
max_episode_steps=200,
)
register(
"CheetahDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.half_cheetah_dir:HalfCheetahDirEnv",
"max_episode_steps": 200,
"n_tasks": None,
},
max_episode_steps=200,
)
register(
"HumanoidDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.humanoid_dir:HumanoidDirEnv",
"max_episode_steps": 200,
"n_tasks": None,
},
max_episode_steps=200,
)
|
[
"gym.envs.registration.register"
] |
[((77, 221), 'gym.envs.registration.register', 'register', (['"""PointRobot-v0"""'], {'entry_point': '"""envs.meta.toy_navigation.point_robot:PointEnv"""', 'kwargs': "{'max_episode_steps': 60, 'n_tasks': 2}"}), "('PointRobot-v0', entry_point=\n 'envs.meta.toy_navigation.point_robot:PointEnv', kwargs={\n 'max_episode_steps': 60, 'n_tasks': 2})\n", (85, 221), False, 'from gym.envs.registration import register\n'), ((228, 404), 'gym.envs.registration.register', 'register', (['"""PointRobotSparse-v0"""'], {'entry_point': '"""envs.meta.toy_navigation.point_robot:SparsePointEnv"""', 'kwargs': "{'max_episode_steps': 60, 'n_tasks': 2, 'goal_radius': 0.2}"}), "('PointRobotSparse-v0', entry_point=\n 'envs.meta.toy_navigation.point_robot:SparsePointEnv', kwargs={\n 'max_episode_steps': 60, 'n_tasks': 2, 'goal_radius': 0.2})\n", (236, 404), False, 'from gym.envs.registration import register\n'), ((411, 483), 'gym.envs.registration.register', 'register', (['"""Wind-v0"""'], {'entry_point': '"""envs.meta.toy_navigation.wind:WindEnv"""'}), "('Wind-v0', entry_point='envs.meta.toy_navigation.wind:WindEnv')\n", (419, 483), False, 'from gym.envs.registration import register\n'), ((496, 715), 'gym.envs.registration.register', 'register', (['"""HalfCheetahVel-v0"""'], {'entry_point': '"""envs.meta.wrappers:mujoco_wrapper"""', 'kwargs': "{'entry_point': 'envs.meta.mujoco.half_cheetah_vel:HalfCheetahVelEnv',\n 'max_episode_steps': 200}", 'max_episode_steps': '(200)'}), "('HalfCheetahVel-v0', entry_point=\n 'envs.meta.wrappers:mujoco_wrapper', kwargs={'entry_point':\n 'envs.meta.mujoco.half_cheetah_vel:HalfCheetahVelEnv',\n 'max_episode_steps': 200}, max_episode_steps=200)\n", (504, 715), False, 'from gym.envs.registration import register\n'), ((778, 1014), 'gym.envs.registration.register', 'register', (['"""AntDir-v0"""'], {'entry_point': '"""envs.meta.wrappers:mujoco_wrapper"""', 'kwargs': "{'entry_point': 'envs.meta.mujoco.ant_dir:AntDirEnv', 'max_episode_steps': \n 200, 'forward_backward': True, 'n_tasks': None}", 'max_episode_steps': '(200)'}), "('AntDir-v0', entry_point='envs.meta.wrappers:mujoco_wrapper',\n kwargs={'entry_point': 'envs.meta.mujoco.ant_dir:AntDirEnv',\n 'max_episode_steps': 200, 'forward_backward': True, 'n_tasks': None},\n max_episode_steps=200)\n", (786, 1014), False, 'from gym.envs.registration import register\n'), ((1062, 1293), 'gym.envs.registration.register', 'register', (['"""CheetahDir-v0"""'], {'entry_point': '"""envs.meta.wrappers:mujoco_wrapper"""', 'kwargs': "{'entry_point': 'envs.meta.mujoco.half_cheetah_dir:HalfCheetahDirEnv',\n 'max_episode_steps': 200, 'n_tasks': None}", 'max_episode_steps': '(200)'}), "('CheetahDir-v0', entry_point='envs.meta.wrappers:mujoco_wrapper',\n kwargs={'entry_point':\n 'envs.meta.mujoco.half_cheetah_dir:HalfCheetahDirEnv',\n 'max_episode_steps': 200, 'n_tasks': None}, max_episode_steps=200)\n", (1070, 1293), False, 'from gym.envs.registration import register\n'), ((1333, 1554), 'gym.envs.registration.register', 'register', (['"""HumanoidDir-v0"""'], {'entry_point': '"""envs.meta.wrappers:mujoco_wrapper"""', 'kwargs': "{'entry_point': 'envs.meta.mujoco.humanoid_dir:HumanoidDirEnv',\n 'max_episode_steps': 200, 'n_tasks': None}", 'max_episode_steps': '(200)'}), "('HumanoidDir-v0', entry_point='envs.meta.wrappers:mujoco_wrapper',\n kwargs={'entry_point': 'envs.meta.mujoco.humanoid_dir:HumanoidDirEnv',\n 'max_episode_steps': 200, 'n_tasks': None}, max_episode_steps=200)\n", (1341, 1554), False, 'from gym.envs.registration import register\n')]
|
#!/usr/bin/evn python
import sqlite3
from flask import Flask, jsonify, g
app = Flask(__name__)
DATABASE = 'union-bridge'
def query_db(query, args=(), one=False):
cur=g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def connect_db():
return sqlite3.connect(DATABASE)
@app.before_request
def before_request():
g.db = connect_db()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route("/")
def index():
test = {
"version": "0.0.1",
"name": "historical-api",
"tables": ("buildings", "facts", "sources", "subjects"),
}
return jsonify(test)
@app.route("/buildings")
def buildings():
values = []
for building in query_db('select * from buildings'):
values.push(building)
return(jsonify(values))
@app.route("/facts")
def facts():
values = []
for fact in query_db('select * from facts'):
values.push(fact)
return(jsonify(values))
@app.route("/sources")
def sources():
values = []
for source in query_db('select * from sources'):
values.push(source)
return(jsonify(values))
@app.route("/subjects")
def subjects():
values = []
for subject in query_db('select * from subjects'):
values.push(subject)
return(jsonify(values))
@app.teardown_appcontext
def close_connection(exeption):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == "__main__":
app.run(debug=True)
|
[
"flask.g.db.execute",
"flask.Flask",
"flask.jsonify",
"flask.g.db.close",
"sqlite3.connect"
] |
[((82, 97), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (87, 97), False, 'from flask import Flask, jsonify, g\n'), ((175, 200), 'flask.g.db.execute', 'g.db.execute', (['query', 'args'], {}), '(query, args)\n', (187, 200), False, 'from flask import Flask, jsonify, g\n'), ((398, 423), 'sqlite3.connect', 'sqlite3.connect', (['DATABASE'], {}), '(DATABASE)\n', (413, 423), False, 'import sqlite3\n'), ((544, 556), 'flask.g.db.close', 'g.db.close', ([], {}), '()\n', (554, 556), False, 'from flask import Flask, jsonify, g\n'), ((784, 797), 'flask.jsonify', 'jsonify', (['test'], {}), '(test)\n', (791, 797), False, 'from flask import Flask, jsonify, g\n'), ((955, 970), 'flask.jsonify', 'jsonify', (['values'], {}), '(values)\n', (962, 970), False, 'from flask import Flask, jsonify, g\n'), ((1109, 1124), 'flask.jsonify', 'jsonify', (['values'], {}), '(values)\n', (1116, 1124), False, 'from flask import Flask, jsonify, g\n'), ((1273, 1288), 'flask.jsonify', 'jsonify', (['values'], {}), '(values)\n', (1280, 1288), False, 'from flask import Flask, jsonify, g\n'), ((1442, 1457), 'flask.jsonify', 'jsonify', (['values'], {}), '(values)\n', (1449, 1457), False, 'from flask import Flask, jsonify, g\n')]
|
from punkweb_boards.conf import settings as BOARD_SETTINGS
from punkweb_boards.models import Report
def settings(request):
return {
"BOARD_SETTINGS": {
"BOARD_NAME": BOARD_SETTINGS.BOARD_NAME,
"BOARD_THEME": BOARD_SETTINGS.BOARD_THEME,
"SHOUTBOX_ENABLED": BOARD_SETTINGS.SHOUTBOX_ENABLED,
"SIGNATURES_ENABLED": BOARD_SETTINGS.SIGNATURES_ENABLED,
"USER_BIRTHDAY_MESSAGE": BOARD_SETTINGS.USER_BIRTHDAY_MESSAGE,
}
}
def base_context(request):
ctx = {}
if request.user.is_authenticated and not request.user.profile.is_banned:
ctx.update({"notifications": request.user.notifications.all()[:5]})
ctx.update({"unread_conversations": request.user.unread_conversations.count()})
ctx.update(
{
"unread_notifications": request.user.notifications.filter(
read=False
).count()
}
)
if request.user.is_staff:
unresolved_reports = Report.objects.filter(resolved=False).count()
ctx.update({"unresolved_reports": unresolved_reports})
return ctx
|
[
"punkweb_boards.models.Report.objects.filter"
] |
[((1039, 1076), 'punkweb_boards.models.Report.objects.filter', 'Report.objects.filter', ([], {'resolved': '(False)'}), '(resolved=False)\n', (1060, 1076), False, 'from punkweb_boards.models import Report\n')]
|
import os
import hou
def main(arguments):
file = arguments["file"].replace(os.sep, '/')
if(arguments["force"] == 0):
hou.hipFile.load(file, suppress_save_prompt=True)
else:
hou.hipFile.save(file_name=None)
hou.hipFile.load(file, suppress_save_prompt=False)
# workspace_path = file.split('/scenes')[0]
# wipcache_path = os.path.split(file.replace('02_shot/3d/scenes', '03_WIP_CACHE_FX'))[0]
# wipcache_path = wipcache_path.replace('01_asset_3d/3d/scenes', '03_WIP_CACHE_FX')
# pubcache_path = os.path.split(file.replace('02_shot/3d/scenes', '04_PUBLISH_CACHE_FX'))[0]
# pubcache_path = pubcache_path.replace('01_asset_3d/3d/scenes', '04_PUBLISH_CACHE_FX')
# hou.putenv('JOB', workspace_path)
# hou.putenv('WIPCACHE', wipcache_path)
# hou.putenv('PUBCACHE', pubcache_path)
|
[
"hou.hipFile.load",
"hou.hipFile.save"
] |
[((135, 184), 'hou.hipFile.load', 'hou.hipFile.load', (['file'], {'suppress_save_prompt': '(True)'}), '(file, suppress_save_prompt=True)\n', (151, 184), False, 'import hou\n'), ((203, 235), 'hou.hipFile.save', 'hou.hipFile.save', ([], {'file_name': 'None'}), '(file_name=None)\n', (219, 235), False, 'import hou\n'), ((244, 294), 'hou.hipFile.load', 'hou.hipFile.load', (['file'], {'suppress_save_prompt': '(False)'}), '(file, suppress_save_prompt=False)\n', (260, 294), False, 'import hou\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# pylint: disable=too-few-public-methods
"""Driloader Command Line Interface
Using Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import argparse
import sys
from driloader.browser.exceptions import BrowserDetectionError
from driloader.factories.browser_factory import BrowserFactory
class OutputType:
"""Output Type
Enum Class to store the possible output types.
Types:
INFO: Any information non-error related.
ERROR: Any error message.
"""
INFO = 'INFO'
ERROR = 'ERROR'
class CliError(Exception):
""" CliError """
def __init__(self, message, cause):
"""Init method
Sets superclass arguments up.
Sets the cause of exception up.
"""
super().__init__(message)
self.cause = cause
# def __str__(self):
# return 'Error: {}.\nCause: {}'.format(self.args[0], self.cause)
class DriloaderCommands:
"""A facade to BrowserDetection"""
@staticmethod
def get_google_chrome_version():
""" Returns Google Chrome version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('CHROME').browser.installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Google Chrome version', str(err)) from err
@staticmethod
def get_firefox_version():
""" Returns Firefox version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('FIREFOX').browser.\
installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Firefox version', str(err)) from err
@staticmethod
def get_internet_explorer_version():
""" Returns Internet Explorer version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('IE').browser.installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Internet Explorer version',
str(err)) from err
def get_all_browsers_versions(self):
""" Returns all browser version.
Args:
self
Returns:
Returns an string with the browser version. Like:
Internet Explorer: 11
Firefox: 45
Google Chrome: 58
Raises:
None
"""
result_message = 'Firefox: {}\nGoogle Chrome: ' \
'{}\nInternet Explorer: {}\n'
try:
ff_version = str(self.get_firefox_version())
except CliError as error:
ff_version = str(error)
try:
chrome_version = str(self.get_google_chrome_version())
except CliError as error:
chrome_version = str(error)
try:
ie_version = str(self.get_internet_explorer_version())
except CliError as error:
ie_version = str(error)
return result_message.format(ff_version, chrome_version, ie_version)
def parse_args():
""" Parse Arguments
Parse arguments from stdin.
Args:
Returns:
A string argument from stdin.
Raises:
None
"""
parser = argparse.ArgumentParser(prog="driloader")
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument('--firefox', '-f',
help='get Firefox version.',
action='store_true')
action.add_argument('--chrome', '-c',
help='get Google Chrome version.',
action='store_true')
action.add_argument('--internet-explorer', '-i',
help='get Internet Explorer version.',
action='store_true')
action.add_argument('--all',
help='look for browser an get their versions.',
action='store_true')
args = parser.parse_args()
for key, value in args.__dict__.items():
if value is True:
return key
return None
def display_output(message, output_type=OutputType.INFO):
""" Display Output
Displays an output message to the correct file descriptor (STDIN or STDOUT) and exits
the script based on the type sent as parameter.
If output_type == OutputType.INFO sends the message to STDIN and exits with code 0.
If output_type == OutputType.ERROR sends the message to STDERR and exits with code 1.
Args:
message: The message to be displayed.
output_type: A type in OutputType class
Returns:
None
Raises:
None
"""
if output_type == OutputType.INFO:
std_descriptor = sys.stdout
exit_code = 0
else:
std_descriptor = sys.stderr
exit_code = 1
message = str(message)
if 'Cause' in message:
message = message.replace('Cause', '\tCause')
print(message, file=std_descriptor)
sys.exit(exit_code)
def main():
""" Main Function
Responsible for:
- call the parse_args() function and get the parameter sent from stdin.
- instantiate the DriloaderCommands class and call its methods based
on the argparser input.
Args:
Returns:
None
Raises:
None
"""
option = parse_args()
commands = DriloaderCommands()
options = {
'chrome': commands.get_google_chrome_version,
'firefox': commands.get_firefox_version,
'internet_explorer': commands.get_internet_explorer_version,
'all': commands.get_all_browsers_versions
}
message = ''
try:
result = options[option]()
message = result
except CliError as cli_error:
display_output(str(cli_error), OutputType.ERROR)
display_output(message, OutputType.INFO)
if __name__ == '__main__':
main()
|
[
"driloader.factories.browser_factory.BrowserFactory",
"argparse.ArgumentParser",
"sys.exit"
] |
[((3882, 3923), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""driloader"""'}), "(prog='driloader')\n", (3905, 3923), False, 'import argparse\n'), ((5618, 5637), 'sys.exit', 'sys.exit', (['exit_code'], {}), '(exit_code)\n', (5626, 5637), False, 'import sys\n'), ((1429, 1453), 'driloader.factories.browser_factory.BrowserFactory', 'BrowserFactory', (['"""CHROME"""'], {}), "('CHROME')\n", (1443, 1453), False, 'from driloader.factories.browser_factory import BrowserFactory\n'), ((1954, 1979), 'driloader.factories.browser_factory.BrowserFactory', 'BrowserFactory', (['"""FIREFOX"""'], {}), "('FIREFOX')\n", (1968, 1979), False, 'from driloader.factories.browser_factory import BrowserFactory\n'), ((2512, 2532), 'driloader.factories.browser_factory.BrowserFactory', 'BrowserFactory', (['"""IE"""'], {}), "('IE')\n", (2526, 2532), False, 'from driloader.factories.browser_factory import BrowserFactory\n')]
|
# -*- coding: utf-8 -*-
import flask
import os
import sys
import ast
import json
import argparse
app = flask.Flask(__name__)
filename = ''
@app.route('/latency_metrics')
def get_latency_percentiles():
""" Retrieves the last saved latency hdr histogram percentiles
and the average latency
Args:
-
Returns:
dict: A JSON object containing the metrics
"""
status = 200
return flask.Response(get_stats_json(),
status=status,
mimetype='application/json')
def get_stats_json():
try:
f = open(filename, 'r')
line = f.readline()
percentiles_dict = {}
while line:
percentile = line.split(' ')[0]
latency = float(line.split(' ')[1])
percentiles_dict[percentile] = latency
line = f.readline()
js = json.dumps(percentiles_dict, indent=2)
f.close()
return js
except Exception as e:
print("An exception occurred")
print(str(e))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Give configuration options')
parser.add_argument('--filename', metavar='filename', type=str,
help='the filename from which will retrieve the metrics')
parser.add_argument('--port', metavar='port', type=int, default=5000,
help='Server port (default 5000)')
args = parser.parse_args()
filename = args.filename
app.run(host='0.0.0.0', port=args.port, debug=True)
|
[
"flask.Flask",
"argparse.ArgumentParser",
"json.dumps"
] |
[((104, 125), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (115, 125), False, 'import flask\n'), ((1105, 1170), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Give configuration options"""'}), "(description='Give configuration options')\n", (1128, 1170), False, 'import argparse\n'), ((898, 936), 'json.dumps', 'json.dumps', (['percentiles_dict'], {'indent': '(2)'}), '(percentiles_dict, indent=2)\n', (908, 936), False, 'import json\n')]
|
import numpy as np
import unittest
from numpy.testing import assert_array_less
from GPyOpt.core.errors import InvalidConfigError
from GPyOpt.core.task.space import Design_space
from GPyOpt.experiment_design import initial_design
class TestInitialDesign(unittest.TestCase):
def setUp(self):
self.space = [
{'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1},
{'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)},
{'name': 'var_3', 'type': 'categorical', 'domain': (0,1,2)}
]
self.design_space = Design_space(self.space)
self.bandit_variable = {'name': 'stations', 'type': 'bandit', 'domain': np.array([[1, 1], [2, 2], [3, 3], [4, 4]])}
def assert_samples_against_space(self, samples):
lower_bound_var1 = self.design_space.name_to_variable['var_1'].domain[0]
upper_bound_var1 = self.design_space.name_to_variable['var_1'].domain[1]
self.assertTrue((samples[:,0] >= lower_bound_var1).all())
self.assertTrue((samples[:,0] <= upper_bound_var1).all())
var2_values = self.design_space.name_to_variable['var_2'].domain
self.assertTrue(np.in1d(samples[:,1], var2_values).all())
var3_values = self.design_space.name_to_variable['var_3'].domain
self.assertTrue(np.in1d(samples[:,2], var3_values).all())
def test_grid_design(self):
init_points_count = 3
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
init_points_count = 1000
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_grid_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 1)
init_points_count = 100
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 3**4)
def test_random_design(self):
init_points_count = 10
samples = initial_design('random', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_random_design_with_constraints(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
samples = initial_design('random', self.design_space, initial_points_count)
self.assert_samples_against_space(samples)
self.assertTrue((samples[:,0]**2 - 1 < 0).all())
def test_random_design_with_bandit_only(self):
space = [self.bandit_variable]
self.design_space = Design_space(space)
initial_points_count = 3
samples = initial_design('random', self.design_space, initial_points_count)
self.assertEqual(len(samples), initial_points_count)
def test_nonrandom_designs_with_constrains(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
with self.assertRaises(InvalidConfigError):
initial_design('grid', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('latin', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('sobol', self.design_space, initial_points_count)
def test_latin_design(self):
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_latin_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_sobol_design(self):
init_points_count = 10
samples = initial_design('sobol', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
|
[
"GPyOpt.core.task.space.Design_space",
"numpy.array",
"GPyOpt.experiment_design.initial_design",
"numpy.in1d"
] |
[((592, 616), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (604, 616), False, 'from GPyOpt.core.task.space import Design_space\n'), ((1451, 1511), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (1465, 1511), False, 'from GPyOpt.experiment_design import initial_design\n'), ((1673, 1733), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (1687, 1733), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2158, 2182), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (2170, 2182), False, 'from GPyOpt.core.task.space import Design_space\n'), ((2233, 2293), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (2247, 2293), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2387, 2447), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'init_points_count'], {}), "('grid', self.design_space, init_points_count)\n", (2401, 2447), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2577, 2639), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'init_points_count'], {}), "('random', self.design_space, init_points_count)\n", (2591, 2639), False, 'from GPyOpt.experiment_design import initial_design\n'), ((2904, 2953), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {'constraints': 'constraints'}), '(self.space, constraints=constraints)\n', (2916, 2953), False, 'from GPyOpt.core.task.space import Design_space\n'), ((3007, 3072), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'initial_points_count'], {}), "('random', self.design_space, initial_points_count)\n", (3021, 3072), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3301, 3320), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['space'], {}), '(space)\n', (3313, 3320), False, 'from GPyOpt.core.task.space import Design_space\n'), ((3373, 3438), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""random"""', 'self.design_space', 'initial_points_count'], {}), "('random', self.design_space, initial_points_count)\n", (3387, 3438), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3659, 3708), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {'constraints': 'constraints'}), '(self.space, constraints=constraints)\n', (3671, 3708), False, 'from GPyOpt.core.task.space import Design_space\n'), ((4215, 4276), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'init_points_count'], {}), "('latin', self.design_space, init_points_count)\n", (4229, 4276), False, 'from GPyOpt.experiment_design import initial_design\n'), ((4702, 4726), 'GPyOpt.core.task.space.Design_space', 'Design_space', (['self.space'], {}), '(self.space)\n', (4714, 4726), False, 'from GPyOpt.core.task.space import Design_space\n'), ((4777, 4838), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'init_points_count'], {}), "('latin', self.design_space, init_points_count)\n", (4791, 4838), False, 'from GPyOpt.experiment_design import initial_design\n'), ((5031, 5092), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""sobol"""', 'self.design_space', 'init_points_count'], {}), "('sobol', self.design_space, init_points_count)\n", (5045, 5092), False, 'from GPyOpt.experiment_design import initial_design\n'), ((698, 740), 'numpy.array', 'np.array', (['[[1, 1], [2, 2], [3, 3], [4, 4]]'], {}), '([[1, 1], [2, 2], [3, 3], [4, 4]])\n', (706, 740), True, 'import numpy as np\n'), ((3808, 3871), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""grid"""', 'self.design_space', 'initial_points_count'], {}), "('grid', self.design_space, initial_points_count)\n", (3822, 3871), False, 'from GPyOpt.experiment_design import initial_design\n'), ((3937, 4001), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""latin"""', 'self.design_space', 'initial_points_count'], {}), "('latin', self.design_space, initial_points_count)\n", (3951, 4001), False, 'from GPyOpt.experiment_design import initial_design\n'), ((4067, 4131), 'GPyOpt.experiment_design.initial_design', 'initial_design', (['"""sobol"""', 'self.design_space', 'initial_points_count'], {}), "('sobol', self.design_space, initial_points_count)\n", (4081, 4131), False, 'from GPyOpt.experiment_design import initial_design\n'), ((1188, 1223), 'numpy.in1d', 'np.in1d', (['samples[:, 1]', 'var2_values'], {}), '(samples[:, 1], var2_values)\n', (1195, 1223), True, 'import numpy as np\n'), ((1328, 1363), 'numpy.in1d', 'np.in1d', (['samples[:, 2]', 'var3_values'], {}), '(samples[:, 2], var3_values)\n', (1335, 1363), True, 'import numpy as np\n')]
|
import os
from datetime import datetime, timedelta
#
# Airflow root directory
#
PROJECT_ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)
)
)
#
# Dates
#
# yesterday at beginning of day
yesterday_start = datetime.now() - timedelta(days=1)
yesterday_start = yesterday_start.replace(hour=0, minute=0, second=0, microsecond=0)
yesterday_start = yesterday_start.isoformat() + 'Z'
# yesterday at end of day
yesterday_end = datetime.now() - timedelta(days=1)
yesterday_end = yesterday_end.replace(hour=23, minute=59, second=59, microsecond=999999)
yesterday_end = yesterday_end.isoformat() + 'Z'
|
[
"os.path.dirname",
"datetime.datetime.now",
"datetime.timedelta"
] |
[((239, 253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (251, 253), False, 'from datetime import datetime, timedelta\n'), ((256, 273), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (265, 273), False, 'from datetime import datetime, timedelta\n'), ((453, 467), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (465, 467), False, 'from datetime import datetime, timedelta\n'), ((470, 487), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (479, 487), False, 'from datetime import datetime, timedelta\n'), ((142, 167), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (157, 167), False, 'import os\n')]
|
import time
import argparse
import numpy as np
import json
import os
import sys
# from matplotlib import pyplot
from torch.utils.data import DataLoader
from preprocessing import Constants
from util import construct_data_from_json
from dgl_treelstm.KNN import KNN
from dgl_treelstm.nn_models import *
from dgl_treelstm.metric import *
from preprocessing import Vocab
from preprocessing import varTree
from dgl_treelstm.dgl_dataset import dgl_dataset
from check_time import process_data
from train import pad_feature_batcher, batcher
from preprocessing.Vector_Dataset import Vector_Dataset
from preprocessing.Tree_Dataset import Tree_Dataset
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve
import warnings
from dataset_filename_seperation import get_dataset_seperation
warnings.filterwarnings('ignore')
# simulation for different models
class Simulation:
def __init__(self, model, time_selection="adjust", threshold=200):
self.model = model
# this threshold can be adaptive, which is updated in the simulation to find a better border for timeout
self.threshold = threshold
# save data for adaptive threshold
self.time_record = {"timeout":[], "solvable":[]}
# this is the actual time setting, which is fixed for comparision the ground truth
self.time_out_setting = 200
self.time_selection = time_selection
if isinstance(self.model, TreeLSTM):
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
elif isinstance(self.model, KNN):
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
elif isinstance(self.model, LSTM):
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
raise NotImplementedError
if self.model_type == "KNN":
dataset = th.load("data/gnucore/fv2/gnucore_train")
x = [i.feature for i in dataset]
y = [1 if i.gettime("adjust") > 300 else 0 for i in dataset]
self.model.fit(x, y)
elif self.model_type == "LSTM":
model = th.load("checkpoints/gnucore/pad_feature_l_z.pkl")["model"]
self.model.load_state_dict(model)
elif self.model_type == "TreeLSTM":
model = th.load("checkpoints/g_tree_feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
raise NotImplementedError
# feature = self.preprocess(script)
if isinstance(data, varTree):
dataloader = dgl_dataset([data], None)
iterator = iter(dataloader)
data = next(iterator)
feature = data.logic_tree
solve_time = data.gettime("original")
elif self.model_type == "LSTM":
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', 'original'),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label
else:
feature, solve_time = data.logic_tree, data.gettime("original")
return feature, solve_time
def predict(self, feature, truth):
raise NotImplementedError
if self.model_type == "KNN":
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
elif self.model_type == "LSTM":
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
else:
self.model.eval()
with th.no_grad():
h = th.zeros((1, 150))
c = th.zeros((1, 150))
predict_result = self.model(feature, h, c)
skip = predict_result > self.threshold
return predict_result, skip
def modify_threshold(self, result, truth):
if self.model_type == "KNN":
return
if result < self.threshold and truth > self.time_out_setting:
self.time_record["timeout"].append(result)
elif result < self.threshold and truth < self.time_out_setting:
self.time_record["solvable"].append(result)
if result < self.threshold and truth > self.time_out_setting:
self.dynamic_threshold()
print("decrease threshold to ", str(self.threshold))
return
def dynamic_threshold(self):
timeout_list = np.array(self.time_record["timeout"])
solvable_list = self.time_record["solvable"]
try:
solvable_limit = max(np.percentile(solvable_list, 95), np.mean(solvable_list), 60)
suitable_timeout = list(filter(lambda x: x > solvable_limit, timeout_list))
if len(suitable_timeout) == 0:
suitable_timeout = [solvable_limit]
suitable_min_timeout = min(suitable_timeout)
suitable_min_timeout = min(suitable_min_timeout, self.threshold)
if isinstance(suitable_min_timeout, th.Tensor):
suitable_min_timeout = suitable_min_timeout.item()
max_solvable = max(filter(lambda x:x <= suitable_min_timeout, self.time_record["solvable"]))
if isinstance(max_solvable, th.Tensor):
max_solvable = max_solvable.item()
self.threshold = max(suitable_min_timeout - 1, (suitable_min_timeout + max_solvable) / 2,
self.threshold - 50, 60)
except (IndexError,ValueError):
pass
class KNN_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(KNN_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
self.separate_test = False
def load_model(self, input):
# dataset = th.load(input)
dataset = construct_data_from_json(input)
# test_filename = ["echo", "ginstall", "expr", "tail", "seq", "split", "test", "yes", "chgrp", "date", "expand",
# "head", "nohup", "printf", "sha1sum", "stat", "timeout", "uniq", "nice", "pr"]
# test_filename = ["expand"]
# dataset = list(filter(lambda x:x.filename not in test_filename, dataset))
x = [i.feature for i in dataset]
if "smt-comp" in input:
fn = [x.filename.split("_")[0] for x in dataset]
else:
fn = [i.filename for i in dataset]
y = [1 if i.gettime(self.time_selection) > self.time_out_setting else 0 for i in dataset]
self.model.fit(x, y)
self.model.filename = np.array(fn)
def script_to_feature(self, data):
if not self.separate_test:
if ".smt2" in data.filename:
fn = data.filename.split("_")[0]
else:
fn = data.filename
self.model.remove_test(fn)
self.separate_test = True
feature, solve_time = data.feature, data.gettime(self.time_selection)
return feature, solve_time
def predict(self, feature, truth):
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
return predict_result, skip
class LSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(LSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', self.time_selection),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label.item()
return feature, solve_time
def predict(self, feature, truth):
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
return predict_result, skip
class TreeLSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(TreeLSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
# model = th.load("checkpoints/g_tree+feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
smt_vocab_file = './data/gnucore/smt.vocab'
smt_vocab = Vocab(filename=smt_vocab_file,
data=[Constants.UNK_WORD])
data = dgl_dataset([data], None, vocab=smt_vocab, time_selection=self.time_selection, time_threshold=self.threshold)
dataloader = DataLoader(dataset=data, batch_size=1, collate_fn=batcher("cpu"),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = data.graph
solve_time = data.label[0].item()
return data, solve_time
def predict(self, feature, truth):
self.model.eval()
n = feature.graph.number_of_nodes()
with th.no_grad():
h = th.zeros((n, 150))
c = th.zeros((n, 150))
predict_result = self.model(feature, h, c)
skip = predict_result[0] > self.threshold
return predict_result[0], skip
# result saving structure
class Evalution:
def __init__(self, pred=np.array([]), truth=np.array([]), time_out_setting=200):
self.pred = self.get_numpy(pred)
self.truth = self.get_numpy(truth)
self.classify_result = np.array([])
self.time_out_setting = time_out_setting
def get_numpy(self, data):
if isinstance(data, th.Tensor):
data = data.cpu().numpy()
else:
data = data
return data
def add(self, pred, truth, classify_result):
self.pred = np.append(self.pred, self.get_numpy(pred))
self.truth = np.append(self.truth, self.get_numpy(truth))
self.classify_result = np.append(self.classify_result, self.get_numpy(classify_result))
def score(self):
truth = [1 if x > self.time_out_setting else 0 for x in self.truth]
acc = accuracy_score(truth, self.classify_result)
pre = precision_score(truth, self.classify_result)
rec = recall_score(truth, self.classify_result)
f1 = f1_score(truth, self.classify_result)
return acc, pre, rec, f1
# time calculation
class Time_Section:
def __init__(self):
self.original_time = 0
self.predict_time = 0
# overall time for simulation comparision(without solving phase 1 which manually added)
self.final_time = 0
self.preprocessing_time = 0
def update(self, predict_result, solve_time):
self.original_time += solve_time
# for the first solving phase t1=1s
self.final_time += 1
# skip if predicted timeout
if not predict_result:
self.final_time += solve_time
def add_prediction_time(self, predict_used_time, preprocessing_time):
self.preprocessing_time = preprocessing_time
self.predict_time = predict_used_time
self.final_time = self.final_time + predict_used_time + preprocessing_time
# load the test data, script to feature just like the training, we do not saving the result because the program number
# we also want to include the processing time into final time
def load_data(model, input):
dataset = None
if model == "Tree-LSTM":
dataset = Tree_Dataset(treeforassert=True, feature_number_limit=100)
elif model == "lstm":
dataset = Vector_Dataset(feature_number_limit=50)
elif model == "KNN":
dataset = Vector_Dataset(feature_number_limit=2)
else:
dataset = Tree_Dataset(feature_number_limit=100)
if "smt-comp" in input:
test_filename = input.split("/")[-1]
input = "/".join(input.split("/")[:-1])
dataset.fs_list = dataset.generate_feature_dataset(input, fileprefix=test_filename)
if len(dataset.fs_list) == 0:
print("smt-comp file are not separated with filename, but please use the similar structure, more information in simulation_smt-comp.md")
# test_filename1 = [x.filename for x in dataset.fs_list]
# test_file = list(filter(lambda x:x.split("_")[0] == test_filename, test_filename1))
# dataset.fs_list = dataset.split_with_filename(test_file)[1]
input = input + "/" + test_filename
else:
if "klee" in input:
# the klee processing is time-consuming because of the SMT scripts structure, so we saved the result for next time
# for other dataset we extract feature every time it simulates.
data_input = "data/klee/" + input.split("/")[-1] + model_name
try:
if model == "KNN":
dataset = construct_data_from_json(data_input)
else:
dataset = th.load(data_input)
except (TypeError,FileNotFoundError):
dataset.generate_feature_dataset(input)
if model != "KNN":
th.save(dataset, data_input)
else:
dataset.generate_feature_dataset(input)
return dataset.fs_list, input
# mainly for cross dataset prediction for adaptive KNN model, rely on my model naming pattern
def identify_dataset(input, dataset):
for i in ["busybox", "smt-comp", "klee"]:
if i in input:
return i
if "g_" in input or "gnucore/" in input:
return "gnucore"
if "b_" in input:
return "busybox"
if "s_" in input:
return "smt-comp"
if "k_" in input:
return "klee"
return "gnucore"
# our baseline result, not usable without result from PCC
def make_PCC_output(input, output_result):
if os.path.exists(input):
with open(input, "r") as f:
data = json.load(f)
serial_result = sorted(data["result"], key=lambda x:(len(x[0]), x[0]))
else:
serial_result = []
for i in range(1,4):
with open(input[:-5] + "_" + str(i) + ".json", "r") as f:
data = json.load(f)
serial_result.extend(sorted(data["result"], key=lambda x: (len(x[0]), x[0])))
od = serial_result
for i in ["arch", "chgrp", "csplit", "dirname", "fmt", "id", "md5sum", "mv", "pinky", "readlink", "seq",
"sleep", "tac", "tsort", "uptime", "base64", "chmod", "cut", "du", "fold", "join", "mkdir",
"nice", "pr", "rm", "setuidgid", "sort", "tail", "tty", "users", "basename", "chroot", "date", "expand", "ginstall",
"link", "mkfifo", "nl", "printenv", "rmdir", "sha1sum", "split", "test", "uname", "vdir",
"cat", "comm", "df", "expr", "head", "ln", "mknod", "od", "printf", "runcon", "shred", "stat", "touch", "unexpand", "wc",
"chcon", "cp", "dir", "factor", "hostname", "ls", "mktemp", "pathchk", "ptx", "shuf", "su",
"tr", "unlink", "who", "ifconfig", "rpm", "Sage2", "klogd", "mcm", "lfsr"]:
serial_result = list(filter(lambda x: x[0].startswith(i), od))
if len(serial_result) == 0:
continue
print(i)
truth = [x[2] for x in serial_result]
if isinstance(truth[0], list):
truth = list(map(lambda x:0 if x[0] else 300, truth))
pred = [x[1] for x in serial_result]
dt_simulation = Simulation(None)
dt_simulation.model_type = "DNN"
if isinstance(pred[0], int):
classify_result = pred
else:
threshold_list = []
for i in range(len(truth)):
dt_simulation.modify_threshold(pred[i], truth[i])
threshold_list.append(dt_simulation.threshold)
classify_result = [1.0 if pred[i] > threshold_list[i] else 0.0 for i in range(len(pred))]
# classify_result = [1.0 if x > data["time_limit_setting"] else 0.0 for x in pred]
original_time = sum(truth)
pred_truth_tuple = list(
zip(range(len(pred)), pred, truth, classify_result))
pred_truth_diff_tuple = list(filter(lambda a: a[3] != (a[2] > data["time_limit_setting"]), pred_truth_tuple))
pred_truth_tuple = list(filter(lambda a: a[3] != 0, pred_truth_tuple))
final_time = original_time - sum([x[2] for x in pred_truth_tuple])
truth = [1 if x > data["time_limit_setting"] else 0 for x in truth]
acc = accuracy_score(truth, classify_result)
pre = precision_score(truth, classify_result)
rec = recall_score(truth, classify_result)
f1 = f1_score(truth, classify_result)
print_output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "pred_truth_diff_tuple": pred_truth_diff_tuple,
"original_time": original_time,
"total_time": final_time, "input": input, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print(print_output)
output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "predicted_result": pred,
"acutal_solving_time": truth, "original_time": original_time, "total_time": final_time,
"metrics": {"acc": acc, "pre": pre, "rec": rec, "f1": f1, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec},
"time_out_setting": data["time_limit_setting"],
"model": "PCC", "input": input}
output = json.dumps(output, indent=4)
# print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# fpr, tpr, thresholds = roc_curve(truth, pred)
# pyplot.plot(fpr, tpr, lw=1, label="lstm")
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
outpur_path = "_".join(["gnucore", input.split("/")[-1], "DNN"]) + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# output the result for a single program
# to do: not support for adaptive threshold for regression simulation
def make_output(dsn1, dsn2, input, simulation, result, time_section, output_result=True, plot_picture=True):
pred_truth_tuple = list(zip(range(len(result.pred)), result.pred.tolist(), result.truth.tolist(), result.classify_result))
pred_truth_tuple = list(filter(lambda a:a[3] != (a[2] > simulation.time_out_setting), pred_truth_tuple))
truth = [1 if x > simulation.time_out_setting else 0 for x in result.truth]
acc = accuracy_score(truth, result.classify_result)
pre = precision_score(truth, result.classify_result)
rec = recall_score(truth, result.classify_result)
f1 = f1_score(truth, result.classify_result)
confusion_matrix = np.zeros((2, 2))
for t, p in zip(truth, result.classify_result):
confusion_matrix[t][int(p)] += 1
# print_output = {"train_dataset": dsn1, "test_dataset": dsn2, "pred_truth_diff_tuple": pred_truth_tuple,
# "original_time": time_section.original_time,
# "predict_time":time_section.predict_time + time_section.preprocessing_time,
# "total_time": time_section.final_time, "input":input, "pos_num":sum(truth), "tp": sum(truth)*rec,
# "fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print_output = {"timeout_query_num":sum(truth), "true-positive number": confusion_matrix[1][1],
"false-negative number": confusion_matrix[1][0], "false-positive number": confusion_matrix[0][1]}
output = {"train_dataset": dsn1, "test_dataset": dsn2, "predicted_result": result.pred.tolist(),
"acutal_solving_time": result.truth.tolist(), "original_time": time_section.original_time, "predict_time":
time_section.predict_time + time_section.preprocessing_time, "total_time": time_section.final_time,
"metrics":{"acc": acc, "pre": pre, "rec": rec, "f1": f1}, "time_out_setting": simulation.time_out_setting,
"model":simulation.model_type, "input":input, "pos_num":sum(truth), "tp": confusion_matrix[1][1],
"fn": confusion_matrix[1][0], "fp": confusion_matrix[0][1]}
if not len(result.truth):
return
output = json.dumps(output, indent=4)
print("train dataset:" + dsn1)
# print("test dataset:" + dsn2)
print("test program:" + input)
print("prediction truth difference tuple(index, predicted result, truth, classification result):")
print(pred_truth_tuple)
print("original solving time:" + str(int(time_section.original_time)) + "s")
print("prediction time:" + str(int(time_section.predict_time + time_section.preprocessing_time)) + "s")
print("solving time with the predictor:" + str(int(time_section.final_time)) + "s")
print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if simulation.model_type != 'KNN':
# fpr, tpr, thresholds = roc_curve(result.truth > simulation.time_out_setting, result.pred)
# pyplot.plot(fpr, tpr, lw=1, label=simulation.model_type)
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
if args.model_name == "KNN":
identify = ""
elif args.classification:
identify = "_c"
elif args.adapt:
identify = "_m"
else:
identify = "_r"
outpur_path = "_".join([dsn1, input.split("/")[-1], simulation.model_type]) + identify + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# automatic partition selection since we use cross validation to generate three piece of result for a model
# used for the hardcoded switch
def choose_input(dataset, input, load_path):
fn = get_dataset_seperation(dataset)
f1, f2, f3 = fn[0], fn[1], fn[2]
input = input.split("/")[-1]
if dataset == "smt-comp":
input = input.split("_")[0]
if os.path.exists(load_path):
return load_path
if input in f1:
load_path = ".".join([load_path.split(".")[0] + "_0", load_path.split(".")[1]])
elif input in f2:
load_path = ".".join([load_path.split(".")[0] + "_1", load_path.split(".")[1]])
elif input in f3:
load_path = ".".join([load_path.split(".")[0] + "_2", load_path.split(".")[1]])
else:
load_path = ""
return load_path
# simulate the solving in real order, in the simulation, the predicted timeout solving would be skipped,
# the time different is taken as the time saved.
# the simulation may not reflect the real situation since wrongly skip path means the change of path selection, but if
# you give it a low priority, then these paths are just deferred, you may execute more paths in the same time budget.
def simulation_for_single_program(test_directory, args):
s = time.time()
input_index = args.input_index
load_path = args.load_file
# some setting process since all simulation use one entry
if not args.regression:
regression = False
else:
input_list[int(input_index)] = input_list[int(input_index)].replace("_r_", "_c_")
regression = True
if model_name == "KNN":
knn = KNN()
simulation = KNN_Simulation(knn, time_selection=args.time_selection)
if not input_index:
input_index = 8
elif model_name == "lstm":
lstm = LSTM(150, regression, False)
simulation = LSTM_Simulation(lstm, time_selection=args.time_selection)
if not input_index:
input_index = 0
else:
tree_lstm = TreeLSTM(133, 150, 150, 1, 0.5, regression, False, cell_type='childsum', pretrained_emb=None)
simulation = TreeLSTM_Simulation(tree_lstm, time_selection=args.time_selection)
if not input_index:
input_index = 2
# setting timeout threshold
# for original time, we collect the data with timeout with 100s, larger than it would be useless
simulation.time_out_setting = args.threshold
if test_directory == None:
test_directory = input_list[int(input_index)]
serial_data, test_input = load_data(model_name, test_directory)
time_section = Time_Section()
result = Evalution(time_out_setting=args.threshold)
# for cross project, identify dataset name
dsn1 = identify_dataset(input_list[int(input_index)], None)
dsn2 = identify_dataset(test_input, serial_data)
# load the model for different approach
if load_path == None:
load_path = input_list[int(input_index)]
if model_name != "KNN":
load_path = choose_input(dsn1, test_input, load_path)
simulation.load_model(load_path)
s1 = time.time()
aindex = 0
# simulation system, but not actual solving since the solving time is consuming, and time may be different
for data in serial_data:
data_index = len(result.truth)
feature, solve_time = simulation.script_to_feature(data)
predict_result, skip = simulation.predict(feature, 1 if solve_time > simulation.time_out_setting else 0)
if len(result.pred) % 500 == 0:
print(len(result.pred))
if model_name != "KNN" and regression and args.adapt:
pass
simulation.modify_threshold(predict_result, solve_time)
if model_name != "KNN" and not regression:
pred = th.argmax(F.log_softmax(predict_result), 1)
skip = pred == 1
predict_result = 1 if skip else 0
time_section.update(skip, solve_time)
result.add(predict_result, solve_time, skip)
aindex += 1
e = time.time()
time_section.add_prediction_time(e - s1, s1 - s)
make_output(dsn1, dsn2, test_directory, simulation, result, time_section, True, True)
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default="KNN", help="model type, allow 'lstm', 'tree-lstm', 'KNN'")
parser.add_argument('--test_directory', default=None, help="the script saving directory for test program")
parser.add_argument('--load_file', default=None, help="the path to model for evaluation")
parser.add_argument('--input_index', type=int, default=8, help="short-way for switch evaluation model,"
"hardcoded, not recommanded to change for use")
parser.add_argument('--time_selection', default='original', help="the time label you want to use, allow "
"'original', 'adjust', the 'adjust' stand for 'z3' by now, modify when you experiment with other solver")
parser.add_argument('--regression', action='store_true', help="used for time prediction(regression),"
"not use for timeout constraint classification(classification)")
parser.add_argument('--adapt', action='store_true', help="an adaptive time threshold for neural network "
"models used for regression, because the predicted timeout threshold varies for different programs")
parser.add_argument('--threshold', type=int, default=200, help="the timeout threshold for solving")
parser.add_argument('--batch-size', type=int, default=64, help="some lstm setting in case you change the model")
parser.add_argument('--x-size', type=int, default=300)
parser.add_argument('--h-size', type=int, default=150)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--num_classes', type=float, default=2)
args = parser.parse_args()
print()
print("Simulation start:")
print(args)
return args
if __name__ == '__main__':
args = parse_arg()
model_name = args.model_name
input_index = args.input_index
# hardcoded short-way for switch evaluation model
input_list = ["checkpoints/simulation/g_serial_pad_feature_l_z_r_200.pkl",#0
"checkpoints/simulation/g_serial_tree_feature_t_z_r_200.pkl",#1
"checkpoints/simulation/g_tree+feature_t_z_r_200.pkl",#2
"checkpoints/simulation/b_serial_pad_feature_l_z_r_200.pkl",#3
"checkpoints/simulation/b_serial_tree_feature_t_z_r_200.pkl",#4
"checkpoints/simulation/b_tree+feature_t_z_r_200.pkl",#5
"checkpoints/simulation/s_serial_pad_feature_l_z_r_200.pkl",#6
"checkpoints/simulation/s_tree_feature_t_z_r_200.pkl",#7
"data/gnucore/fv2_serial/train",#8
"data/busybox/fv2_serial/train",#9
"data/smt-comp/fv2_serial/train",#10
"data/klee/fv2_serial/train",#11
"checkpoints/simulation/k_serial_pad_feature_l_z_r_200.pkl",#12
"checkpoints/simulation/k_serial_tree_feature_l_z_r_200.pkl"]#13
if args.load_file == None and (args.input_index > 13 or args.input_index < 0):
print("these paths are hardcoded shortway for specific directory name")
print(input_list)
exit(0)
# test for all programs in a dataset, the home directory is "data/gnucore/single_test"
# test_input_list = []
# for root, dir, files in os.walk("data/gnucore/single_test"):
# if not root.endswith("single_test"):
# test_input_list.append(root)
# for i in test_input_list:
# input = i
# simulation_for_single_program(test_directory, input_index)
if args.test_directory:
test_directory = args.test_directory
else:
test_directory = "data/example/arch"
# some test
# test_directory = "data/smt-comp/QF_BV/Sage"
# test_directory = "data/klee/arch-43200/solver-queries.smt2"
simulation_for_single_program(test_directory, args)
# make_PCC_output("data/PCC_result/mcm_c.json", False)
# regression simulation, not remember much, different time threshold
# input = "checkpoints/smt-comp/serial_pad_feature_evaluation_c.pkl"
# if os.path.exists(input):
# serial_result = th.load(input)["result"]
# else:
# serial_result = []
# for i in range(1, 4):
# a = th.load(input[:-4] + "_" + str(i) + ".pkl")["result"]
# serial_result.extend(a)
# result = serial_result
# pred = np.array(list(map(lambda x:x[0], result)))
# truth = np.array(list(map(lambda x:x[1], result)))
# for a in [40,50,60,100,150,200,250]:
# if truth.dtype == "int64":
# t, p = truth, pred
# else:
# t, p = truth > a, pred > a
# print("threshold", a)
# acc = accuracy_score(t, p)
# pre = precision_score(t, p)
# rec = recall_score(t, p)
# f1 = f1_score(t, p)
# print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if truth.dtype == "int64":
# break
# try:
# fpr, tpr, thresholds = precision_recall_curve(truth > a, pred)
# pyplot.plot(tpr, fpr, lw=1, label="lstm")
# # print(fpr)
# # print(tpr)
# # print(thresholds)
# i = np.searchsorted(thresholds, a)
# print(fpr[i], tpr[i], thresholds[i])
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
# except (IndexError, ValueError):
# pass
|
[
"argparse.ArgumentParser",
"sklearn.metrics.accuracy_score",
"json.dumps",
"sklearn.metrics.f1_score",
"numpy.mean",
"preprocessing.Tree_Dataset.Tree_Dataset",
"preprocessing.Vector_Dataset.Vector_Dataset",
"os.path.exists",
"train.batcher",
"util.construct_data_from_json",
"preprocessing.Vocab",
"sklearn.metrics.recall_score",
"numpy.percentile",
"json.load",
"dgl_treelstm.dgl_dataset.dgl_dataset",
"warnings.filterwarnings",
"numpy.zeros",
"time.time",
"dgl_treelstm.KNN.KNN",
"numpy.array",
"sklearn.metrics.precision_score",
"train.pad_feature_batcher",
"dataset_filename_seperation.get_dataset_seperation"
] |
[((858, 891), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (881, 891), False, 'import warnings\n'), ((14925, 14946), 'os.path.exists', 'os.path.exists', (['input'], {}), '(input)\n', (14939, 14946), False, 'import os\n'), ((20244, 20289), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20258, 20289), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20300, 20346), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20315, 20346), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20357, 20400), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20369, 20400), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20410, 20449), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'result.classify_result'], {}), '(truth, result.classify_result)\n', (20418, 20449), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((20473, 20489), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (20481, 20489), True, 'import numpy as np\n'), ((21996, 22024), 'json.dumps', 'json.dumps', (['output'], {'indent': '(4)'}), '(output, indent=4)\n', (22006, 22024), False, 'import json\n'), ((24024, 24055), 'dataset_filename_seperation.get_dataset_seperation', 'get_dataset_seperation', (['dataset'], {}), '(dataset)\n', (24046, 24055), False, 'from dataset_filename_seperation import get_dataset_seperation\n'), ((24199, 24224), 'os.path.exists', 'os.path.exists', (['load_path'], {}), '(load_path)\n', (24213, 24224), False, 'import os\n'), ((25090, 25101), 'time.time', 'time.time', ([], {}), '()\n', (25099, 25101), False, 'import time\n'), ((26914, 26925), 'time.time', 'time.time', ([], {}), '()\n', (26923, 26925), False, 'import time\n'), ((27837, 27848), 'time.time', 'time.time', ([], {}), '()\n', (27846, 27848), False, 'import time\n'), ((28023, 28048), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28046, 28048), False, 'import argparse\n'), ((4911, 4948), 'numpy.array', 'np.array', (["self.time_record['timeout']"], {}), "(self.time_record['timeout'])\n", (4919, 4948), True, 'import numpy as np\n'), ((6384, 6415), 'util.construct_data_from_json', 'construct_data_from_json', (['input'], {}), '(input)\n', (6408, 6415), False, 'from util import construct_data_from_json\n'), ((7116, 7128), 'numpy.array', 'np.array', (['fn'], {}), '(fn)\n', (7124, 7128), True, 'import numpy as np\n'), ((9527, 9584), 'preprocessing.Vocab', 'Vocab', ([], {'filename': 'smt_vocab_file', 'data': '[Constants.UNK_WORD]'}), '(filename=smt_vocab_file, data=[Constants.UNK_WORD])\n', (9532, 9584), False, 'from preprocessing import Vocab\n'), ((9626, 9740), 'dgl_treelstm.dgl_dataset.dgl_dataset', 'dgl_dataset', (['[data]', 'None'], {'vocab': 'smt_vocab', 'time_selection': 'self.time_selection', 'time_threshold': 'self.threshold'}), '([data], None, vocab=smt_vocab, time_selection=self.\n time_selection, time_threshold=self.threshold)\n', (9637, 9740), False, 'from dgl_treelstm.dgl_dataset import dgl_dataset\n'), ((10477, 10489), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10485, 10489), True, 'import numpy as np\n'), ((10497, 10509), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10505, 10509), True, 'import numpy as np\n'), ((10649, 10661), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10657, 10661), True, 'import numpy as np\n'), ((11266, 11309), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11280, 11309), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11324, 11368), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11339, 11368), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11383, 11424), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11395, 11424), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((11438, 11475), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'self.classify_result'], {}), '(truth, self.classify_result)\n', (11446, 11475), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((12602, 12660), 'preprocessing.Tree_Dataset.Tree_Dataset', 'Tree_Dataset', ([], {'treeforassert': '(True)', 'feature_number_limit': '(100)'}), '(treeforassert=True, feature_number_limit=100)\n', (12614, 12660), False, 'from preprocessing.Tree_Dataset import Tree_Dataset\n'), ((17558, 17596), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17572, 17596), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17611, 17650), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17626, 17650), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17665, 17701), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17677, 17701), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((17715, 17747), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'classify_result'], {}), '(truth, classify_result)\n', (17723, 17747), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve\n'), ((18710, 18738), 'json.dumps', 'json.dumps', (['output'], {'indent': '(4)'}), '(output, indent=4)\n', (18720, 18738), False, 'import json\n'), ((25453, 25458), 'dgl_treelstm.KNN.KNN', 'KNN', ([], {}), '()\n', (25456, 25458), False, 'from dgl_treelstm.KNN import KNN\n'), ((2753, 2778), 'dgl_treelstm.dgl_dataset.dgl_dataset', 'dgl_dataset', (['[data]', 'None'], {}), '([data], None)\n', (2764, 2778), False, 'from dgl_treelstm.dgl_dataset import dgl_dataset\n'), ((12705, 12744), 'preprocessing.Vector_Dataset.Vector_Dataset', 'Vector_Dataset', ([], {'feature_number_limit': '(50)'}), '(feature_number_limit=50)\n', (12719, 12744), False, 'from preprocessing.Vector_Dataset import Vector_Dataset\n'), ((15003, 15015), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15012, 15015), False, 'import json\n'), ((5048, 5080), 'numpy.percentile', 'np.percentile', (['solvable_list', '(95)'], {}), '(solvable_list, 95)\n', (5061, 5080), True, 'import numpy as np\n'), ((5082, 5104), 'numpy.mean', 'np.mean', (['solvable_list'], {}), '(solvable_list)\n', (5089, 5104), True, 'import numpy as np\n'), ((8250, 8297), 'train.pad_feature_batcher', 'pad_feature_batcher', (['"""cpu"""', 'self.time_selection'], {}), "('cpu', self.time_selection)\n", (8269, 8297), False, 'from train import pad_feature_batcher, batcher\n'), ((9807, 9821), 'train.batcher', 'batcher', (['"""cpu"""'], {}), "('cpu')\n", (9814, 9821), False, 'from train import pad_feature_batcher, batcher\n'), ((12788, 12826), 'preprocessing.Vector_Dataset.Vector_Dataset', 'Vector_Dataset', ([], {'feature_number_limit': '(2)'}), '(feature_number_limit=2)\n', (12802, 12826), False, 'from preprocessing.Vector_Dataset import Vector_Dataset\n'), ((12855, 12893), 'preprocessing.Tree_Dataset.Tree_Dataset', 'Tree_Dataset', ([], {'feature_number_limit': '(100)'}), '(feature_number_limit=100)\n', (12867, 12893), False, 'from preprocessing.Tree_Dataset import Tree_Dataset\n'), ((15254, 15266), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15263, 15266), False, 'import json\n'), ((13964, 14000), 'util.construct_data_from_json', 'construct_data_from_json', (['data_input'], {}), '(data_input)\n', (13988, 14000), False, 'from util import construct_data_from_json\n'), ((3058, 3096), 'train.pad_feature_batcher', 'pad_feature_batcher', (['"""cpu"""', '"""original"""'], {}), "('cpu', 'original')\n", (3077, 3096), False, 'from train import pad_feature_batcher, batcher\n')]
|
from setuptools import setup
setup(
name="integrity",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
packages=["integrity"],
entry_points={"console_scripts": ["integrity = integrity.__main__:main"]},
)
|
[
"setuptools.setup"
] |
[((30, 220), 'setuptools.setup', 'setup', ([], {'name': '"""integrity"""', 'version': '"""0.1.0"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['integrity']", 'entry_points': "{'console_scripts': ['integrity = integrity.__main__:main']}"}), "(name='integrity', version='0.1.0', author='<NAME>', author_email=\n '<EMAIL>', packages=['integrity'], entry_points={'console_scripts': [\n 'integrity = integrity.__main__:main']})\n", (35, 220), False, 'from setuptools import setup\n')]
|
from SingleLog.log import Logger
from . import data_type
from . import i18n
from . import connect_core
from . import screens
from . import exceptions
from . import command
from . import _api_util
def get_bottom_post_list(api, board):
api._goto_board(board, end=True)
logger = Logger('get_bottom_post_list', Logger.INFO)
last_screen = api.connect_core.get_screen_queue()[-1]
bottom_screen = [line for line in last_screen.split('\n') if '★' in line[:8]]
bottom_length = len(bottom_screen)
# bottom_screen = '\n'.join(bottom_screen)
# print(bottom_screen)
if bottom_length == 0:
logger.info(i18n.catch_bottom_post_success)
return list()
cmd_list = list()
cmd_list.append(command.query_post)
cmd = ''.join(cmd_list)
target_list = [
connect_core.TargetUnit(
i18n.catch_post_success,
screens.Target.QueryPost,
break_detect=True,
refresh=False,
log_level=Logger.DEBUG),
connect_core.TargetUnit(
i18n.post_deleted,
screens.Target.InBoard,
break_detect=True,
log_level=Logger.DEBUG),
connect_core.TargetUnit(
i18n.no_such_board,
screens.Target.MainMenu_Exiting,
exceptions_=exceptions.NoSuchBoard(api.config, board)
),
]
result = list()
for _ in range(0, bottom_length):
api.connect_core.send(cmd, target_list)
last_screen = api.connect_core.get_screen_queue()[-1]
lock_post, post_author, post_title, post_aid, post_web, post_money, list_date, push_number, post_index = \
_api_util.parse_query_post(
api,
last_screen)
current_post = api.get_post(board, aid=post_aid, query=True)
# print(current_post.aid)
# print(current_post.title)
# print('==========================')
result.append(current_post)
cmd_list = list()
cmd_list.append(command.enter)
cmd_list.append(command.up)
cmd_list.append(command.query_post)
cmd = ''.join(cmd_list)
logger.info(i18n.catch_bottom_post_success)
return list(reversed(result))
|
[
"SingleLog.log.Logger"
] |
[((288, 331), 'SingleLog.log.Logger', 'Logger', (['"""get_bottom_post_list"""', 'Logger.INFO'], {}), "('get_bottom_post_list', Logger.INFO)\n", (294, 331), False, 'from SingleLog.log import Logger\n')]
|
"""
Set of bot commands designed for Maths Challenges.
"""
from io import BytesIO
import aiohttp
import dateutil.parser
import httpx
from discord import Colour, Embed, File
from discord.ext import tasks
from discord.ext.commands import Bot, Cog, Context, command
from html2markdown import convert
from cdbot.constants import Maths as constants
async def get_challenges(
client: httpx.AsyncClient, page_index: int = 0, page_size: int = 999
):
"""Get challenges, given the relevant parameters."""
return (
await client.post(
constants.Challenges.URL,
headers=dict(accessToken=constants.Challenges.TOKEN),
json={
"pageIndex": page_index,
"pageSize": page_size,
"orderBy": [{"desc": "answerDate"}],
"where": [
{"field": "sys.versionStatus", "equalTo": "published"},
{"field": "sys.contentTypeId", "in": ["mathsQuiz"]},
],
"fields": ["entryTitle", "category", "sys", "description", "answer"],
},
)
).json()["items"]
async def get_challenge(number: int) -> dict:
async with httpx.AsyncClient() as client:
challenge, *_ = await get_challenges(client, page_index=number - 1, page_size=1)
question = (
await client.post(
constants.Challenges.URL,
headers=dict(accessToken=constants.Challenges.TOKEN),
json={
"pageIndex": 0,
"pageSize": 1,
"where": [
{"field": "sys.slug", "equalTo": challenge["sys"]["slug"]},
{"field": "sys.versionStatus", "equalTo": "published"},
],
},
)
).json()["items"][0]["question"]
asset = question[1]["value"]["asset"]["sys"] if len(question) > 1 else None
return {
"title": challenge["entryTitle"],
"published": dateutil.parser.isoparse(
challenge["sys"]["version"]["created"]
).strftime("%d/%m/%Y"),
"category": challenge["category"][0]["entryTitle"],
"challenge": convert(question[0]["value"]).replace(" ", "")[:-1],
"image": (
(
"https://www.kingsmathsschool.com"
"".join(
asset["uri"].rpartition("/")[:2] + (asset["properties"]["filename"],)
)
)
if asset
else ""
),
"description": challenge["description"],
"slug": challenge["sys"]["slug"],
}
class Maths(Cog):
"""Maths-related commands."""
def __init__(self, bot: Bot):
self.bot = bot
self.update_challenge.start()
@tasks.loop(minutes=1)
async def update_challenge(self):
"""Check the Kings site for the latest challenges."""
print("Updating maths challenges...")
latest_challenge = float("inf")
latest_challenge = int(
self.channel.topic.split("Nerds, the lot of you | Challenge ")[1].split(
" "
)[0][:-1]
)
async with httpx.AsyncClient() as client:
challenges = await get_challenges(client)
for number, challenge in enumerate(challenges[::-1], 1):
title = challenge["entryTitle"]
if number > latest_challenge:
await self.challenge(self.channel, len(challenges) - number + 1)
await self.channel.edit(topic=constants.Challenges.TOPIC.format(title))
print("Maths challenges successfully updated.")
@update_challenge.before_loop
async def wait_until_ready(self):
"""Wait for bot to become ready."""
await self.bot.wait_until_ready()
self.channel = self.bot.get_channel(constants.Challenges.CHANNEL)
@Cog.listener()
async def on_message(self, message):
"""Check if the message contains inline LaTeX."""
# Cap the number processed in a single message to 3 for now, to reduce spam.
for expression in constants.LATEX_RE.findall(message.content)[:3]:
await self.latex(message.channel, expression)
@command()
async def challenge(self, ctx: Context, number: int = 1):
"""Show the provided challenge number."""
challenge = await get_challenge(number)
description = challenge["challenge"]
if len(description) > 2048:
description = description[:2045] + "..."
embed = Embed(
title=challenge["title"],
colour=Colour(0xE5E242),
url=f"https://www.kingsmathsschool.com/weekly-maths-challenge/{challenge['slug']}",
description=description,
)
embed.set_image(url=challenge["image"])
embed.set_thumbnail(
url="https://pbs.twimg.com/profile_images/502115424121528320/hTQzj_-R.png"
)
embed.set_author(name="King's Maths School")
embed.set_footer(
text=f"Challenge Released: {challenge['published']} | Category: {challenge['category']}"
)
return await ctx.send(embed=embed)
@command()
async def latex(self, ctx: Context, expression: str):
"""Render a LaTeX expression."""
channel = ctx.channel.id if type(ctx) is Context else ctx.id
if channel in constants.BLOCKED_CHANNELS:
return await ctx.send(
"\N{NO ENTRY SIGN} You cannot use this command in this channel!", delete_after=10
)
options = {
"auth": {"user": "guest", "password": "<PASSWORD>"},
"latex": expression,
"resolution": 900,
"color": "969696",
}
async with aiohttp.ClientSession() as session:
async with session.post(
"http://latex2png.com/api/convert", json=options
) as response:
result = await response.json()
if result.get('url'):
async with session.get("http://latex2png.com" + result["url"]) as response:
content = await response.content.read()
else:
return
await ctx.send(file=File(BytesIO(content), filename="result.png"))
def setup(bot):
"""
Required boilerplate for adding functionality of cog to bot.
"""
bot.add_cog(Maths(bot))
|
[
"io.BytesIO",
"discord.ext.commands.command",
"html2markdown.convert",
"cdbot.constants.Maths.LATEX_RE.findall",
"cdbot.constants.Maths.Challenges.TOPIC.format",
"discord.ext.commands.Cog.listener",
"httpx.AsyncClient",
"aiohttp.ClientSession",
"discord.ext.tasks.loop",
"discord.Colour"
] |
[((2807, 2828), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(1)'}), '(minutes=1)\n', (2817, 2828), False, 'from discord.ext import tasks\n'), ((3903, 3917), 'discord.ext.commands.Cog.listener', 'Cog.listener', ([], {}), '()\n', (3915, 3917), False, 'from discord.ext.commands import Bot, Cog, Context, command\n'), ((4241, 4250), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (4248, 4250), False, 'from discord.ext.commands import Bot, Cog, Context, command\n'), ((5200, 5209), 'discord.ext.commands.command', 'command', ([], {}), '()\n', (5207, 5209), False, 'from discord.ext.commands import Bot, Cog, Context, command\n'), ((1194, 1213), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (1211, 1213), False, 'import httpx\n'), ((3203, 3222), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (3220, 3222), False, 'import httpx\n'), ((4128, 4171), 'cdbot.constants.Maths.LATEX_RE.findall', 'constants.LATEX_RE.findall', (['message.content'], {}), '(message.content)\n', (4154, 4171), True, 'from cdbot.constants import Maths as constants\n'), ((5786, 5809), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5807, 5809), False, 'import aiohttp\n'), ((4625, 4641), 'discord.Colour', 'Colour', (['(15065666)'], {}), '(15065666)\n', (4631, 4641), False, 'from discord import Colour, Embed, File\n'), ((2213, 2242), 'html2markdown.convert', 'convert', (["question[0]['value']"], {}), "(question[0]['value'])\n", (2220, 2242), False, 'from html2markdown import convert\n'), ((6258, 6274), 'io.BytesIO', 'BytesIO', (['content'], {}), '(content)\n', (6265, 6274), False, 'from io import BytesIO\n'), ((3566, 3606), 'cdbot.constants.Maths.Challenges.TOPIC.format', 'constants.Challenges.TOPIC.format', (['title'], {}), '(title)\n', (3599, 3606), True, 'from cdbot.constants import Maths as constants\n')]
|
# Generated by Django 2.1.3 on 2019-03-09 13:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learn', '0012_challenge_has_indent'),
]
operations = [
migrations.RemoveField(
model_name='challenge',
name='has_indent',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((227, 292), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""challenge"""', 'name': '"""has_indent"""'}), "(model_name='challenge', name='has_indent')\n", (249, 292), False, 'from django.db import migrations\n')]
|
import re
def split_phone_numbers(s):
return re.split(r'[ -]', s)
for i in range(int(input())):
match = split_phone_numbers(input())
print('CountryCode=' + match[0] + ',LocalAreaCode=' + match[1] + ',Number=' + match[2])
|
[
"re.split"
] |
[((50, 69), 're.split', 're.split', (['"""[ -]"""', 's'], {}), "('[ -]', s)\n", (58, 69), False, 'import re\n')]
|
from sqlalchemy.schema import FetchedValue
from app.api.utils.models_mixins import AuditMixin, Base
from app.extensions import db
class GovernmentAgencyType(AuditMixin, Base):
__tablename__ = 'government_agency_type'
government_agency_type_code = db.Column(db.String, primary_key=True)
description = db.Column(db.String, nullable=False)
active_ind = db.Column(db.Boolean, nullable=False, server_default=FetchedValue())
def __repr__(self):
return f'<{self.__class__.__name__} {self.government_agency_type_code}>'
@classmethod
def get_all(cls):
return cls.query.all()
|
[
"app.extensions.db.Column",
"sqlalchemy.schema.FetchedValue"
] |
[((258, 296), 'app.extensions.db.Column', 'db.Column', (['db.String'], {'primary_key': '(True)'}), '(db.String, primary_key=True)\n', (267, 296), False, 'from app.extensions import db\n'), ((315, 351), 'app.extensions.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (324, 351), False, 'from app.extensions import db\n'), ((422, 436), 'sqlalchemy.schema.FetchedValue', 'FetchedValue', ([], {}), '()\n', (434, 436), False, 'from sqlalchemy.schema import FetchedValue\n')]
|
"""This file is the main module which contains the app.
"""
from app import create_app, db
from app.auth.auth_cli import getToken
from decouple import config
from flask.cli import AppGroup
import click
import config as configs
# Figure out which config we want based on the `ENV` env variable, default to local
from app.utils.backfill import backfill
env_config = config("ENV", cast=str, default="localpsql")
config_dict = {
'production': configs.Production,
'localpsql': configs.LocalPSQLConfig,
'develop': configs.Develop,
'testing': configs.Testing,
}
app = create_app(config_dict[env_config]())
# for production, require a real SECRET_KEY to be set
if env_config == 'production':
assert app.config['SECRET_KEY'] != "12345", "You must set a secure SECRET_KEY"
# register a custom command to get authentication tokens
auth_cli = AppGroup('auth')
@auth_cli.command("getToken")
@click.argument('name')
def getToken_cli(name):
click.echo(getToken(name))
app.cli.add_command(auth_cli)
@app.cli.command()
def deploy():
"""Run deployment tasks"""
# e.g. this _used_ to be where a database migration would run via `upgrade()`
pass
utils_cli = AppGroup('utils')
@utils_cli.command("backfill")
@click.argument('input_file')
def backfill_cli(input_file):
backfill(input_file)
app.cli.add_command(utils_cli)
|
[
"click.argument",
"app.auth.auth_cli.getToken",
"decouple.config",
"flask.cli.AppGroup",
"app.utils.backfill.backfill"
] |
[((367, 411), 'decouple.config', 'config', (['"""ENV"""'], {'cast': 'str', 'default': '"""localpsql"""'}), "('ENV', cast=str, default='localpsql')\n", (373, 411), False, 'from decouple import config\n'), ((857, 873), 'flask.cli.AppGroup', 'AppGroup', (['"""auth"""'], {}), "('auth')\n", (865, 873), False, 'from flask.cli import AppGroup\n'), ((905, 927), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (919, 927), False, 'import click\n'), ((1185, 1202), 'flask.cli.AppGroup', 'AppGroup', (['"""utils"""'], {}), "('utils')\n", (1193, 1202), False, 'from flask.cli import AppGroup\n'), ((1235, 1263), 'click.argument', 'click.argument', (['"""input_file"""'], {}), "('input_file')\n", (1249, 1263), False, 'import click\n'), ((1298, 1318), 'app.utils.backfill.backfill', 'backfill', (['input_file'], {}), '(input_file)\n', (1306, 1318), False, 'from app.utils.backfill import backfill\n'), ((967, 981), 'app.auth.auth_cli.getToken', 'getToken', (['name'], {}), '(name)\n', (975, 981), False, 'from app.auth.auth_cli import getToken\n')]
|
import json
from pathlib import Path
from pbpstats.data_loader.abs_data_loader import check_file_directory
from pbpstats.data_loader.stats_nba.file_loader import StatsNbaFileLoader
class StatsNbaShotsFileLoader(StatsNbaFileLoader):
"""
A ``StatsNbaShotsFileLoader`` object should be instantiated and passed into ``StatsNbaShotsLoader`` when loading data from file
:param str file_directory:
Directory in which data should be loaded from.
The specific file location will be `stats_home_shots_<game_id>.json`
and `stats_away_shots_<game_id>.json` in the `/game_details` subdirectory.
"""
def __init__(self, file_directory):
self.file_directory = file_directory
@check_file_directory
def load_data(self, game_id):
self.game_id = game_id
self.home_file_path = (
f"{self.file_directory }/game_details/stats_home_shots_{self.game_id}.json"
)
self.away_file_path = (
f"{self.file_directory }/game_details/stats_away_shots_{self.game_id}.json"
)
home_data_file = Path(self.home_file_path)
if not home_data_file.is_file():
raise Exception(f"{self.home_file_path} does not exist")
with open(self.home_file_path) as json_data:
self.home_source_data = json.load(json_data)
away_data_file = Path(self.away_file_path)
if not away_data_file.is_file():
raise Exception(f"{self.away_file_path} does not exist")
with open(self.away_file_path) as json_data:
self.away_source_data = json.load(json_data)
return self.home_source_data, self.away_source_data
|
[
"pathlib.Path",
"json.load"
] |
[((1094, 1119), 'pathlib.Path', 'Path', (['self.home_file_path'], {}), '(self.home_file_path)\n', (1098, 1119), False, 'from pathlib import Path\n'), ((1366, 1391), 'pathlib.Path', 'Path', (['self.away_file_path'], {}), '(self.away_file_path)\n', (1370, 1391), False, 'from pathlib import Path\n'), ((1319, 1339), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (1328, 1339), False, 'import json\n'), ((1591, 1611), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (1600, 1611), False, 'import json\n')]
|
import numpy as np
import pandas as pd
students = 250
nr_to_label = {0: 'bike', 1: 'car', 2: 'bus 40', 3: 'bus 240'}
label_to_nr = {v: k for k, v in nr_to_label.items()}
def choice(income, distance, lazy):
"""
Generate a choice based on the params
"""
if income < 500:
if distance < 8 and distance * lazy * lazy < 120:
return label_to_nr['bike']
elif income > 350:
return label_to_nr['bus 40']
else:
return label_to_nr['bus 240']
if lazy < 3:
return label_to_nr['bus 40']
return label_to_nr['car']
# generate some random numbers
idc = np.array([np.round(np.random.normal(300, 200, size=students).clip(min=0)),
np.random.poisson(8, size=students),
np.random.randint(1, 10, size=students)]).T
# get their favourite mode of transport
idct = np.hstack((idc, np.array([[choice(*row) for row in idc]]).T))
# add some randomness by shuffling some labels
replace = np.where(np.random.random(size=students) < 0.15)[0]
idct[replace, 3] = np.random.randint(0, 4, size=replace.size)
# store result
df = pd.DataFrame(idct, columns=['income', 'distance', 'lazy', 'transport'])
df['transport'] = df['transport'].map(nr_to_label)
df.to_csv('transport.csv', sep=';', encoding='utf-8')
|
[
"pandas.DataFrame",
"numpy.random.randint",
"numpy.random.random",
"numpy.random.poisson",
"numpy.random.normal"
] |
[((1058, 1100), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': 'replace.size'}), '(0, 4, size=replace.size)\n', (1075, 1100), True, 'import numpy as np\n'), ((1122, 1193), 'pandas.DataFrame', 'pd.DataFrame', (['idct'], {'columns': "['income', 'distance', 'lazy', 'transport']"}), "(idct, columns=['income', 'distance', 'lazy', 'transport'])\n", (1134, 1193), True, 'import pandas as pd\n'), ((722, 757), 'numpy.random.poisson', 'np.random.poisson', (['(8)'], {'size': 'students'}), '(8, size=students)\n', (739, 757), True, 'import numpy as np\n'), ((775, 814), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': 'students'}), '(1, 10, size=students)\n', (792, 814), True, 'import numpy as np\n'), ((996, 1027), 'numpy.random.random', 'np.random.random', ([], {'size': 'students'}), '(size=students)\n', (1012, 1027), True, 'import numpy as np\n'), ((650, 691), 'numpy.random.normal', 'np.random.normal', (['(300)', '(200)'], {'size': 'students'}), '(300, 200, size=students)\n', (666, 691), True, 'import numpy as np\n')]
|
# Generated by Django 3.1.7 on 2021-04-22 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20210414_1640'),
]
operations = [
migrations.AddField(
model_name='document',
name='password',
field=models.CharField(blank=True, max_length=16),
),
migrations.AlterField(
model_name='document',
name='file_url',
field=models.CharField(blank=True, max_length=255),
),
]
|
[
"django.db.models.CharField"
] |
[((350, 393), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(16)'}), '(blank=True, max_length=16)\n', (366, 393), False, 'from django.db import migrations, models\n'), ((524, 568), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)'}), '(blank=True, max_length=255)\n', (540, 568), False, 'from django.db import migrations, models\n')]
|
from __future__ import print_function
import os
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient.http import MediaFileUpload
import pandas as pd
import sys
def upload_files():
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
try:
os.remove("fileIds.csv")
print('Arquivo fileIds removido com sucesso!')
except:
print("Oops!", sys.exc_info()[0], "occurred.")
print('Arquivo fileIds não encontrado!')
SCOPES = 'PATH/drive.file'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(
'client_secret.json', scope=SCOPES)
creds = tools.run_flow(flow, store,flags) \
if flags else tools.run(flow,store)
DRIVE = build('drive', 'v3', http = creds.authorize(Http()))
FILES = (
('YOUR_FILE.csv','(application/vnd.ms-excel)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
)
folder_id = 'YOUR FOLDER ID'
filesIds = []
for filename, mimeType in FILES:
metadata = {'name': filename,
'parents': [folder_id],
'resumable':True,}
if mimeType:
metadata['mimeType'] = mimeType
res = DRIVE.files().create(body=metadata, media_body=filename).execute()
filesIds.append(res.get('id'))
if res:
print('Upload "%s" (%s): ' %(filename,res['mimeType']))
print('File ID: %s' %(res.get('id')))
df = pd.DataFrame(filesIds)
df.to_csv('fileIds.csv', sep=';', index=False, header=None)
|
[
"oauth2client.file.Storage",
"pandas.DataFrame",
"os.remove",
"oauth2client.tools.run",
"httplib2.Http",
"argparse.ArgumentParser",
"oauth2client.client.flow_from_clientsecrets",
"oauth2client.tools.run_flow",
"sys.exc_info"
] |
[((695, 723), 'oauth2client.file.Storage', 'file.Storage', (['"""storage.json"""'], {}), "('storage.json')\n", (707, 723), False, 'from oauth2client import file, client, tools\n'), ((2008, 2030), 'pandas.DataFrame', 'pd.DataFrame', (['filesIds'], {}), '(filesIds)\n', (2020, 2030), True, 'import pandas as pd\n'), ((445, 469), 'os.remove', 'os.remove', (['"""fileIds.csv"""'], {}), "('fileIds.csv')\n", (454, 469), False, 'import os\n'), ((801, 867), 'oauth2client.client.flow_from_clientsecrets', 'client.flow_from_clientsecrets', (['"""client_secret.json"""'], {'scope': 'SCOPES'}), "('client_secret.json', scope=SCOPES)\n", (831, 867), False, 'from oauth2client import file, client, tools\n'), ((899, 933), 'oauth2client.tools.run_flow', 'tools.run_flow', (['flow', 'store', 'flags'], {}), '(flow, store, flags)\n', (913, 933), False, 'from oauth2client import file, client, tools\n'), ((966, 988), 'oauth2client.tools.run', 'tools.run', (['flow', 'store'], {}), '(flow, store)\n', (975, 988), False, 'from oauth2client import file, client, tools\n'), ((313, 363), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[tools.argparser]'}), '(parents=[tools.argparser])\n', (336, 363), False, 'import argparse\n'), ((1045, 1051), 'httplib2.Http', 'Http', ([], {}), '()\n', (1049, 1051), False, 'from httplib2 import Http\n'), ((563, 577), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (575, 577), False, 'import sys\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from importlib import import_module
import json
import os
import pkgutil
import yaml
from azure.cli.core.application import APPLICATION, Configuration
from azure.cli.core.commands import _update_command_definitions, BLACKLISTED_MODS
from azure.cli.core.help_files import helps
from azure.cli.core.commands.arm import add_id_parameters
import azclishell.configuration as config
class LoadFreshTable(object):
"""
this class generates and dumps the fresh command table into a file
as well as installs all the modules
"""
def __init__(self):
self.command_table = None
def install_modules(self):
installed_command_modules = []
for cmd in self.command_table:
try:
self.command_table[cmd].load_arguments()
except (ImportError, ValueError):
pass
mods_ns_pkg = import_module('azure.cli.command_modules')
for _, modname, _ in pkgutil.iter_modules(mods_ns_pkg.__path__):
if modname not in BLACKLISTED_MODS:
installed_command_modules.append(modname)
for mod in installed_command_modules:
try:
mod = import_module('azure.cli.command_modules.' + mod)
mod.load_params(mod)
mod.load_commands()
except Exception: # pylint: disable=broad-except
print("Error loading: {}".format(mod))
_update_command_definitions(self.command_table)
def load_help_files(self, data):
""" loads all the extra information from help files """
for cmd in helps:
diction_help = yaml.load(helps[cmd])
# extra descriptions
if "short-summary" in diction_help:
if cmd in data:
data[cmd]['help'] = diction_help["short-summary"]
else:
data[cmd] = {
'help': diction_help["short-summary"],
'parameters': {}
}
if callable(data[cmd]['help']):
data[cmd]['help'] = data[cmd]['help']()
# if there is extra help for this command but it's not reflected in the command table
if cmd not in data:
print("Command: {} not in Command Table".format(cmd))
continue
# extra parameters
if "parameters" in diction_help:
for param in diction_help["parameters"]:
if param["name"].split()[0] not in data[cmd]['parameters']:
options = {
'name': [],
'required': '',
'help': ''
}
data[cmd]['parameters'] = {
param["name"].split()[0]: options
}
if "short-summary" in param:
data[cmd]['parameters'][param["name"].split()[0]]['help']\
= param["short-summary"]
# extra examples
if "examples" in diction_help:
examples = []
for example in diction_help["examples"]:
examples.append([example['name'], example['text']])
data[cmd]['examples'] = examples
def dump_command_table(self):
""" dumps the command table """
self.command_table = APPLICATION.configuration.get_command_table()
command_file = config.CONFIGURATION.get_help_files()
self.install_modules()
add_id_parameters(self.command_table)
data = {}
for cmd in self.command_table:
com_descrip = {} # commands to their descriptions, examples, and parameter info
param_descrip = {} # parameters to their aliases, required, and descriptions
try:
command_description = self.command_table[cmd].description
if callable(command_description):
command_description = command_description()
com_descrip['help'] = command_description
com_descrip['examples'] = ""
# checking all the parameters for a single command
for key in self.command_table[cmd].arguments:
required = ""
help_desc = ""
if self.command_table[cmd].arguments[key].type.settings.get('required'):
required = "[REQUIRED]"
if self.command_table[cmd].arguments[key].type.settings.get('help'):
help_desc = self.command_table[cmd].arguments[key].type.settings.get('help')
# checking aliasing
name_options = []
for name in self.command_table[cmd].arguments[key].options_list:
name_options.append(name)
options = {
'name': name_options,
'required': required,
'help': help_desc
}
# the key is the first alias option
param_descrip[self.command_table[cmd].arguments[key].options_list[0]] = options
com_descrip['parameters'] = param_descrip
data[cmd] = com_descrip
except (ImportError, ValueError):
pass
self.load_help_files(data)
# dump into the cache file
with open(os.path.join(get_cache_dir(), command_file), 'w') as help_file:
json.dump(data, help_file)
def get_cache_dir():
""" gets the location of the cache """
azure_folder = config.get_config_dir()
cache_path = os.path.join(azure_folder, 'cache')
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
FRESH_TABLE = LoadFreshTable()
|
[
"azure.cli.core.commands.arm.add_id_parameters",
"azure.cli.core.commands._update_command_definitions",
"yaml.load",
"json.dump",
"os.makedirs",
"importlib.import_module",
"azure.cli.core.application.APPLICATION.configuration.get_command_table",
"os.path.exists",
"pkgutil.iter_modules",
"azclishell.configuration.get_config_dir",
"azclishell.configuration.CONFIGURATION.get_help_files",
"os.path.join"
] |
[((6118, 6141), 'azclishell.configuration.get_config_dir', 'config.get_config_dir', ([], {}), '()\n', (6139, 6141), True, 'import azclishell.configuration as config\n'), ((6159, 6194), 'os.path.join', 'os.path.join', (['azure_folder', '"""cache"""'], {}), "(azure_folder, 'cache')\n", (6171, 6194), False, 'import os\n'), ((1330, 1372), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['mods_ns_pkg.__path__'], {}), '(mods_ns_pkg.__path__)\n', (1350, 1372), False, 'import pkgutil\n'), ((1815, 1862), 'azure.cli.core.commands._update_command_definitions', '_update_command_definitions', (['self.command_table'], {}), '(self.command_table)\n', (1842, 1862), False, 'from azure.cli.core.commands import _update_command_definitions, BLACKLISTED_MODS\n'), ((3848, 3893), 'azure.cli.core.application.APPLICATION.configuration.get_command_table', 'APPLICATION.configuration.get_command_table', ([], {}), '()\n', (3891, 3893), False, 'from azure.cli.core.application import APPLICATION, Configuration\n'), ((3917, 3954), 'azclishell.configuration.CONFIGURATION.get_help_files', 'config.CONFIGURATION.get_help_files', ([], {}), '()\n', (3952, 3954), True, 'import azclishell.configuration as config\n'), ((3995, 4032), 'azure.cli.core.commands.arm.add_id_parameters', 'add_id_parameters', (['self.command_table'], {}), '(self.command_table)\n', (4012, 4032), False, 'from azure.cli.core.commands.arm import add_id_parameters\n'), ((6206, 6234), 'os.path.exists', 'os.path.exists', (['azure_folder'], {}), '(azure_folder)\n', (6220, 6234), False, 'import os\n'), ((6244, 6269), 'os.makedirs', 'os.makedirs', (['azure_folder'], {}), '(azure_folder)\n', (6255, 6269), False, 'import os\n'), ((6281, 6307), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (6295, 6307), False, 'import os\n'), ((6317, 6340), 'os.makedirs', 'os.makedirs', (['cache_path'], {}), '(cache_path)\n', (6328, 6340), False, 'import os\n'), ((1258, 1300), 'importlib.import_module', 'import_module', (['"""azure.cli.command_modules"""'], {}), "('azure.cli.command_modules')\n", (1271, 1300), False, 'from importlib import import_module\n'), ((2018, 2039), 'yaml.load', 'yaml.load', (['helps[cmd]'], {}), '(helps[cmd])\n', (2027, 2039), False, 'import yaml\n'), ((6006, 6032), 'json.dump', 'json.dump', (['data', 'help_file'], {}), '(data, help_file)\n', (6015, 6032), False, 'import json\n'), ((1566, 1615), 'importlib.import_module', 'import_module', (["('azure.cli.command_modules.' + mod)"], {}), "('azure.cli.command_modules.' + mod)\n", (1579, 1615), False, 'from importlib import import_module\n')]
|
# test function gradient
def limetr_gradient():
import numpy as np
from limetr.__init__ import LimeTr
ok = True
# setup test problem
# -------------------------------------------------------------------------
model = LimeTr.testProblem(use_trimming=True,
use_constraints=True,
use_regularizer=True,
use_uprior=True,
use_gprior=True,
know_obs_std=False,
share_obs_std=True)
tol = 1e-6
# test the gradient
# -------------------------------------------------------------------------
x = np.random.randn(model.k)
x[model.idx_gamma] = 0.1
x[model.idx_delta] = 0.1
tr_grad = model.gradient(x, use_ad=True)
my_grad = model.gradient(x)
err = np.linalg.norm(tr_grad - my_grad)
ok = ok and err < tol
if not ok:
print('err', err)
print('tr_grad', tr_grad)
print('my_grad', my_grad)
return ok
|
[
"limetr.__init__.LimeTr.testProblem",
"numpy.linalg.norm",
"numpy.random.randn"
] |
[((244, 408), 'limetr.__init__.LimeTr.testProblem', 'LimeTr.testProblem', ([], {'use_trimming': '(True)', 'use_constraints': '(True)', 'use_regularizer': '(True)', 'use_uprior': '(True)', 'use_gprior': '(True)', 'know_obs_std': '(False)', 'share_obs_std': '(True)'}), '(use_trimming=True, use_constraints=True, use_regularizer\n =True, use_uprior=True, use_gprior=True, know_obs_std=False,\n share_obs_std=True)\n', (262, 408), False, 'from limetr.__init__ import LimeTr\n'), ((715, 739), 'numpy.random.randn', 'np.random.randn', (['model.k'], {}), '(model.k)\n', (730, 739), True, 'import numpy as np\n'), ((887, 920), 'numpy.linalg.norm', 'np.linalg.norm', (['(tr_grad - my_grad)'], {}), '(tr_grad - my_grad)\n', (901, 920), True, 'import numpy as np\n')]
|
from common.vec_env.vec_logger import VecLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
GAMMA = 0.99
TAU = 1.00
N_STEPS = 5
CLIP_GRAD = 50
COEF_VALUE = 0.5
COEF_ENTROPY = 0.01
def train(args, venv, model, path, device):
N = args.num_processes
net = model(venv.observation_space.shape[0], venv.action_space.n).to(device)
net.train()
optimizer = optim.Adam(net.parameters(), lr=args.lr, amsgrad=args.amsgrad)
vlogger = VecLogger(N=N, path=path)
vlogger.add_model(net)
state = venv.reset()
state_v = torch.from_numpy(state).float().to(device)
hx = torch.zeros(N, 512).to(device)
cx = torch.zeros(N, 512).to(device)
t = 0
while t < args.num_timesteps:
# Reset gradients
loss_value_v = torch.zeros(1, 1).to(device)
loss_policy_v = torch.zeros(1, 1).to(device)
loss_entropy_v = torch.zeros(1, 1).to(device)
gae_v = torch.zeros(N, 1).to(device)
hx.detach_()
cx.detach_()
reward_vs = []
done_vs = []
value_vs = []
log_prob_action_vs = []
entropy_vs = []
for step in range(N_STEPS):
# Perform action according to policy
value_v, logit_v, (hx, cx) = net(state_v, (hx, cx))
prob_v = F.softmax(logit_v, dim=1)
action_v = prob_v.multinomial(num_samples=1)
action = action_v.data.cpu().numpy()
log_prob_v = F.log_softmax(logit_v, dim=1)
log_prob_action_v = log_prob_v.gather(1, action_v)
entropy_v = -(log_prob_v * prob_v).sum(dim=1, keepdim=True)
# Receive reward and new state
state, reward, done, info = venv.step(action)
t += N
reward = np.expand_dims(reward, axis=1)
done = np.expand_dims(done, axis=1)
info = np.expand_dims(info, axis=1)
vlogger.log(t, reward, info)
state_v = torch.from_numpy(state).float().to(device)
reward_v = torch.from_numpy(reward).float().to(device)
done_v = torch.from_numpy(done.astype('int')).float().to(device)
reward_vs.append(reward_v)
done_vs.append(done_v)
value_vs.append(value_v)
log_prob_action_vs.append(log_prob_action_v)
entropy_vs.append(entropy_v)
# Reset the LSTM state if done
hx = (1 - done_v) * hx
cx = (1 - done_v) * cx
# R
R_v = (1 - done_v) * net(state_v, (hx, cx))[0]
value_vs.append(R_v)
for i in reversed(range(len(reward_vs))):
R_v = (1 - done_vs[i]) * GAMMA * R_v + reward_vs[i]
# Accumulate gradients
adv_v = R_v.detach() - value_vs[i]
# Generalized Advantage Estimataion
delta_t = reward_vs[i] + (1 - done_vs[i]) * GAMMA * value_vs[i + 1] - value_vs[i]
gae_v = gae_v * (1 - done_vs[i]) * GAMMA * TAU + delta_t
loss_value_v += (0.5 * adv_v.pow(2)).sum()
loss_policy_v -= (log_prob_action_vs[i] * gae_v.detach()).sum() # cautious: detach()
loss_entropy_v -= (entropy_vs[i]).sum()
net.zero_grad()
loss_v = COEF_VALUE * loss_value_v + loss_policy_v + COEF_ENTROPY * loss_entropy_v
loss_v.backward()
nn.utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
optimizer.step()
venv.close()
|
[
"numpy.expand_dims",
"torch.nn.functional.softmax",
"common.vec_env.vec_logger.VecLogger",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.from_numpy"
] |
[((521, 546), 'common.vec_env.vec_logger.VecLogger', 'VecLogger', ([], {'N': 'N', 'path': 'path'}), '(N=N, path=path)\n', (530, 546), False, 'from common.vec_env.vec_logger import VecLogger\n'), ((666, 685), 'torch.zeros', 'torch.zeros', (['N', '(512)'], {}), '(N, 512)\n', (677, 685), False, 'import torch\n'), ((706, 725), 'torch.zeros', 'torch.zeros', (['N', '(512)'], {}), '(N, 512)\n', (717, 725), False, 'import torch\n'), ((1350, 1375), 'torch.nn.functional.softmax', 'F.softmax', (['logit_v'], {'dim': '(1)'}), '(logit_v, dim=1)\n', (1359, 1375), True, 'import torch.nn.functional as F\n'), ((1508, 1537), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit_v'], {'dim': '(1)'}), '(logit_v, dim=1)\n', (1521, 1537), True, 'import torch.nn.functional as F\n'), ((1816, 1846), 'numpy.expand_dims', 'np.expand_dims', (['reward'], {'axis': '(1)'}), '(reward, axis=1)\n', (1830, 1846), True, 'import numpy as np\n'), ((1866, 1894), 'numpy.expand_dims', 'np.expand_dims', (['done'], {'axis': '(1)'}), '(done, axis=1)\n', (1880, 1894), True, 'import numpy as np\n'), ((1914, 1942), 'numpy.expand_dims', 'np.expand_dims', (['info'], {'axis': '(1)'}), '(info, axis=1)\n', (1928, 1942), True, 'import numpy as np\n'), ((832, 849), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (843, 849), False, 'import torch\n'), ((885, 902), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (896, 902), False, 'import torch\n'), ((939, 956), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (950, 956), False, 'import torch\n'), ((984, 1001), 'torch.zeros', 'torch.zeros', (['N', '(1)'], {}), '(N, 1)\n', (995, 1001), False, 'import torch\n'), ((614, 637), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (630, 637), False, 'import torch\n'), ((2007, 2030), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2023, 2030), False, 'import torch\n'), ((2073, 2097), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (2089, 2097), False, 'import torch\n')]
|
# Copyright (c) 2020 University of Illinois
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
#
# cache.py
#
# Create and populate a cache class
from xml.etree import ElementTree
from xml.dom import minidom
class Cache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict, ruby=False):
self.name = "cache"
self.id = "cache"
self.parameters = \
{
"config" : \
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width, Associativity,"\
" Bank, Throughput w.r.t. core clock, Latency w.r.t. core clock," \
" Output Width, Cache Policy: 0 no write or write-though with" \
" non-write allocate; 1 write-back with write-allocate"],
"buffer_sizes" : \
["0,1,2,3","Cache controller buffer sizes: miss_buffer_size" \
"(MSHR), fill_buffer_size, prefetch_buffer_size, wb_buffer_size"],
"clockrate" : \
["1000","Clock rate in MHz"],
"vdd" : \
["1.2","Voltage"],
"power_gating_vcc" : \
["-1","-1 means default power gating"],
"ports" : \
["1,1,1","Number of R, W, RW ports"],
"device_type" : \
["0","0: HP, 1: LP"]
}
self.stats = \
{
"duty_cycle" : \
["1.0",""],
"read_accesses" : \
["0", "Cache Read Accesses Total"],
"read_misses" : \
["0", "Cache Read Req Misses Total"],
"write_accesses" : \
["0", "Cache Write Accesses Total"],
"write_misses" : \
["0", "Cache Write Req Misses Total"],
"conflicts" : \
["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
if ruby:
self.parameters["config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.parameters["clockrate"][0]= \
str((1.0e-6/float(config_dict["clock"]))*1.0e12)
self.parameters["vdd"][0]= \
str(float(sim_dict["voltage"]))
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadExReq_accesses::total"][1]) \
+int(stat_dict["ReadCleanReq_accesses::total"][1]) \
+int(stat_dict["ReadSharedReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadCleanReq_misses::total"][1]) \
+int(stat_dict["ReadExReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WritebackDirty_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WritebackClean_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
else:
self.parameters["config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.parameters["clockrate"][0]= \
str((1.0e-6/float(config_dict["clock"]))*1.0e12)
self.parameters["vdd"][0]= \
str(float(sim_dict["voltage"]))
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadExReq_accesses::total"][1]) \
+int(stat_dict["ReadCleanReq_accesses::total"][1]) \
+int(stat_dict["ReadSharedReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadCleanReq_misses::total"][1]) \
+int(stat_dict["ReadExReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WritebackDirty_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WritebackClean_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
class ICache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict):
self.name = "icache"
self.id = "icache"
self.parameters = \
{
"icache_config" : \
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width,"
"Associativity, Bank, Throughput w.r.t. core clock, Latency"
"w.r.t. core clock, Output Width, Cache Policy: 0 no write or"
"write-though with non-write allocate; 1 write-back with"
"write-allocate"],
"buffer_sizes" : \
["0,1,2,3","Cache controller buffer sizes:"
"miss_buffer_size(MSHR), fill_buffer_size,"
"prefetch_buffer_size, wb_buffer_size"]
}
self.stats = \
{
"read_accesses" : ["0", "Cache Read Accesses Total"],
"read_misses" : ["0", "Cache Read Req Misses Total"],
"conflicts" : ["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
self.parameters["icache_config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadReq_misses::total"][1]))
self.stats["conflicts"][0]=str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and
subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
class DCache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict):
self.name = "dcache"
self.id = "dcache"
self.parameters = \
{
"dcache_config" :
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width,"
"Associativity, Bank, Throughput w.r.t. core clock, Latency"
"w.r.t. core clock, Output Width, Cache Policy: 0 no write or"
"write-though with non-write allocate; 1 write-back with"
"write-allocate"],
"buffer_sizes" :
["0,1,2,3","Cache controller buffer sizes:"
"miss_buffer_size(MSHR), fill_buffer_size,"
"prefetch_buffer_size, wb_buffer_size"]
}
self.stats = \
{
"read_accesses" : ["0", "Cache Read Accesses Total"],
"read_misses" : ["0", "Cache Read Req Misses Total"],
"write_accesses" : ["0", "Cache Write Accesses Total"],
"write_misses" : ["0", "Cache Write Req Misses Total"],
"conflicts" : ["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
self.parameters["dcache_config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WriteReq_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WriteReq_misses::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and
subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
|
[
"xml.etree.ElementTree.Element"
] |
[((6252, 6312), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""component"""'], {'id': 'self.id', 'name': 'self.name'}), "('component', id=self.id, name=self.name)\n", (6271, 6312), False, 'from xml.etree import ElementTree\n'), ((8546, 8606), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""component"""'], {'id': 'self.id', 'name': 'self.name'}), "('component', id=self.id, name=self.name)\n", (8565, 8606), False, 'from xml.etree import ElementTree\n'), ((11155, 11215), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""component"""'], {'id': 'self.id', 'name': 'self.name'}), "('component', id=self.id, name=self.name)\n", (11174, 11215), False, 'from xml.etree import ElementTree\n'), ((6471, 6540), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""param"""'], {'name': 'key', 'value': 'self.parameters[key][0]'}), "('param', name=key, value=self.parameters[key][0])\n", (6490, 6540), False, 'from xml.etree import ElementTree\n'), ((6700, 6763), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""stat"""'], {'name': 'key', 'value': 'self.stats[key][0]'}), "('stat', name=key, value=self.stats[key][0])\n", (6719, 6763), False, 'from xml.etree import ElementTree\n'), ((8765, 8834), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""param"""'], {'name': 'key', 'value': 'self.parameters[key][0]'}), "('param', name=key, value=self.parameters[key][0])\n", (8784, 8834), False, 'from xml.etree import ElementTree\n'), ((8994, 9057), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""stat"""'], {'name': 'key', 'value': 'self.stats[key][0]'}), "('stat', name=key, value=self.stats[key][0])\n", (9013, 9057), False, 'from xml.etree import ElementTree\n'), ((11374, 11443), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""param"""'], {'name': 'key', 'value': 'self.parameters[key][0]'}), "('param', name=key, value=self.parameters[key][0])\n", (11393, 11443), False, 'from xml.etree import ElementTree\n'), ((11603, 11666), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""stat"""'], {'name': 'key', 'value': 'self.stats[key][0]'}), "('stat', name=key, value=self.stats[key][0])\n", (11622, 11666), False, 'from xml.etree import ElementTree\n')]
|
import math
from heapq import heappop, heappush
import pickle
from itertools import islice
k=5
threshhold = 10
doc_length = {}
champion_list = {}
scores = {}
heap_flag = 1
champion_flag = 1
index_elimination_flag = 1
# set into a list
def convert(set):
return sorted(set)
#sort
def sort(list):
return sorted(list.items(), key=lambda x: x[1], reverse=True)
#power
def power(b , p):
return math.pow(b ,p)
#logarithm
def logarithm(number , b):
return math.log(number , b)
def one_word(query):
docs_res = {}
if query in inverted_index.keys():
docs = inverted_index[query]
for d in docs :
docs_res[d] = URLs[d]
return d , docs_res
else :
return 'Sorry! The Query Does Not Found'
def multi_words(tokens):
relation = {}
for token in tokens.split():
if token in inverted_index.keys():
for i in inverted_index[token]:
relation[i] += 1
relation = sorted(relation.items(), key=lambda x: x[1], reverse=True)
index = list(relation.keys())[0]
return index,URLs[index]
# def one_word(query):
# docs_res = {}
# if query in inverted_index.keys():
# docs = inverted_index[query]
# for d in docs :
# docs_res[d] = URLs[d]
# return d , docs_res
# else :
# return 'Sorry! The Query Does Not Found'
# def multi_words(tokens):
# token_relevance = {}
# for token in tokens.split():
# if((index_elimination_flag and calculate_idf(token)<threshhold) or (not index_elimination_flag)):
# if (token in inverted_index.keys()):
# for i in inverted_index[token]:
# token_relevance[i] += 1
# token_relevance = sort(token_relevance)
# d = list(token_relevance.keys())[0]
# return d,URLs[d]
def C_list(tokens):
for t in tokens:
champion_list[t] = {}
if t in inverted_index.values():
for id in inverted_index[t].keys():
champion_list[t][id] = inverted_index[t][id] / calculate_doc_length(id)
champion_list[t] = sort(champion_list[t])
return champion_list
def calculate_doc_length(id):
l = 0
for token in contents[id].split():
# print(inverted_index[token])
# print('\n\n\n')
if token in inverted_index.keys():
for id , num in enumerate(inverted_index[token]):
l += power(num , 2)
l= power(l , 1.0/2)
return l
def calculate_tf(token , id):
w=0
tf = token_frequency_inDoc[token][id]
if (tf > 0):
w = 1 + logarithm(tf , 10)
return w
def calculate_idf(token):
N = len(contents)
df = len(inverted_index[token])
idf = logarithm((N / df) , 10)
return idf
def tf_idf(token, id):
return calculate_tf(token, id) * calculate_idf(token)
def cosine_similarity(query, id):
result = 0
for token in query.split():
if token in contents[id].split():
result += tf_idf(token, id) * inverted_index[token][id]
result /= doc_length[id]
return result
def calculate_score(query , champion_list):
for token in query.split():
# print('helllllllllllooooooooooooo')
# print('token is : ' , token)
if (champion_flag == 1) :
if ((token in champion_list.keys()) and (not champion_list[token])):
# print('+++++++',champion_list[token])
for j in champion_list[token]:
print(j)
# print('helllllllllllooooooooooooo')
id = j[0]
scores[id] = 0
if id in scores.keys():
scores[id] += cosine_similarity(token,id)
else :
for j in inverted_index[token]:
id = j[0]
scores[id] = 0
if id in scores.keys():
scores[id] += cosine_similarity(token,id)
return scores
def bubble_sort(my_list):
for mx in range(len(my_list)-1, -1, -1):
swapped = False
for i in range(mx):
if my_list[i][1] < my_list[i+1][1]:
my_list[i], my_list[i+1] = my_list[i+1], my_list[i]
swapped = True
if not swapped:
break
return my_list
def k_scores(query , champion_list):
scores = calculate_score(query , champion_list)
# print(scores)
best_scores = {}
if (heap_flag == 1):
heap = []
# print(scores)
for id in scores.keys():
heappush(heap, (-scores[id], id))
# print(id)
for i in range(k):
max, id = heappop(heap)
best_scores[id] = -max
else :
best_scores = bubble_sort(scores)
return list(islice(best_scores, k))
return best_scores
def main():
global inverted_index , contents , URLs , token_frequency_inDoc , champion_list
with open("inverted_index.txt", "rb+") as fp1: # Unpickling
inverted_index = pickle.load(fp1)
with open("contents.txt", "rb+") as fp2:
contents = pickle.load(fp2)
# print(contents[1])
# print(len(contents))
# print('\n\n\n')
with open("URLS.txt", "rb+") as fp3:
URLs = pickle.load(fp3)
with open("token_frequency_inDoc.txt", "rb+") as fp4:
token_frequency_inDoc = pickle.load(fp4)
with open("tokens.txt", "rb+") as fp5:
tokens = pickle.load(fp5)
champion_list = C_list(tokens)
# print('champion list : ' , champion_list)
number = input("1:Single Query\n2:Multi Query\n")
query = input("Please Enter Your Query: ")
if (int(number)==1):
print(one_word(query))
elif(int(number)==2):
print(multi_words(query))
else :
print('invalid input')
# else :
# print('invalid input')
if __name__ == "__main__":
main()
|
[
"heapq.heappush",
"math.pow",
"heapq.heappop",
"pickle.load",
"itertools.islice",
"math.log"
] |
[((409, 423), 'math.pow', 'math.pow', (['b', 'p'], {}), '(b, p)\n', (417, 423), False, 'import math\n'), ((476, 495), 'math.log', 'math.log', (['number', 'b'], {}), '(number, b)\n', (484, 495), False, 'import math\n'), ((5155, 5171), 'pickle.load', 'pickle.load', (['fp1'], {}), '(fp1)\n', (5166, 5171), False, 'import pickle\n'), ((5238, 5254), 'pickle.load', 'pickle.load', (['fp2'], {}), '(fp2)\n', (5249, 5254), False, 'import pickle\n'), ((5390, 5406), 'pickle.load', 'pickle.load', (['fp3'], {}), '(fp3)\n', (5401, 5406), False, 'import pickle\n'), ((5498, 5514), 'pickle.load', 'pickle.load', (['fp4'], {}), '(fp4)\n', (5509, 5514), False, 'import pickle\n'), ((5581, 5597), 'pickle.load', 'pickle.load', (['fp5'], {}), '(fp5)\n', (5592, 5597), False, 'import pickle\n'), ((4674, 4707), 'heapq.heappush', 'heappush', (['heap', '(-scores[id], id)'], {}), '(heap, (-scores[id], id))\n', (4682, 4707), False, 'from heapq import heappop, heappush\n'), ((4783, 4796), 'heapq.heappop', 'heappop', (['heap'], {}), '(heap)\n', (4790, 4796), False, 'from heapq import heappop, heappush\n'), ((4906, 4928), 'itertools.islice', 'islice', (['best_scores', 'k'], {}), '(best_scores, k)\n', (4912, 4928), False, 'from itertools import islice\n')]
|
import ctypes
import itertools
import windows
import windows.hooks
from windows.generated_def.winstructs import *
class Ressource(object):
def __init__(self, filename, lpName, lpType):
self.filename = filename
self.lpName = lpName
self.lpType = lpType
self.driver_data = None
self.loaded_ressource = None
def match(self, hModule, lpName, lpType):
x = not hModule and self.lpName == lpName and self.lpType == lpType
return x
def get_driver_data(self):
if self.driver_data is not None:
return self.driver_data
self.driver_data = open(self.filename, 'rb').read()
return self.driver_data
def load_resource(self):
driver_data = self.get_driver_data()
char_p = ctypes.c_char_p(driver_data)
real_addr = ctypes.cast(char_p, ctypes.c_void_p).value
return real_addr
def resource_len(self):
return len(self.get_driver_data())
resource_list = []
HRSRC_dict = {}
HRSRC_attibution = itertools.count(0x42424242)
@windows.hooks.Callback(PVOID, PVOID, PVOID, PVOID)
def FindResourceWHook(hModule, lpName, lpType, real_function):
for res in resource_list:
if res.match(hModule, lpName, lpType):
HRSRC = next(HRSRC_attibution)
HRSRC_dict[HRSRC] = res
return HRSRC
return real_function()
@windows.hooks.SizeofResourceCallback
def SizeofResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].resource_len()
return real_function()
@windows.hooks.LoadResourceCallback
def LoadResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].load_resource()
return real_function()
@windows.hooks.LockResourceCallback
def LockResourceHook(hResData, real_function):
x = real_function()
return x
|
[
"ctypes.c_char_p",
"ctypes.cast",
"windows.hooks.Callback",
"itertools.count"
] |
[((1029, 1056), 'itertools.count', 'itertools.count', (['(1111638594)'], {}), '(1111638594)\n', (1044, 1056), False, 'import itertools\n'), ((1060, 1110), 'windows.hooks.Callback', 'windows.hooks.Callback', (['PVOID', 'PVOID', 'PVOID', 'PVOID'], {}), '(PVOID, PVOID, PVOID, PVOID)\n', (1082, 1110), False, 'import windows\n'), ((785, 813), 'ctypes.c_char_p', 'ctypes.c_char_p', (['driver_data'], {}), '(driver_data)\n', (800, 813), False, 'import ctypes\n'), ((834, 870), 'ctypes.cast', 'ctypes.cast', (['char_p', 'ctypes.c_void_p'], {}), '(char_p, ctypes.c_void_p)\n', (845, 870), False, 'import ctypes\n')]
|
import torch
from torch import nn
from kornia import augmentation as K
from kornia import filters as F
from torchvision import transforms
from .augmenter import RandomAugmentation
from .randaugment import RandAugmentNS
# for type hint
from typing import List, Tuple, Union, Callable
from torch import Tensor
from torch.nn import Module
from PIL.Image import Image as PILImage
DatasetStatType = List[float]
ImageSizeType = Tuple[int, int]
PaddingInputType = Union[float, Tuple[float, float], Tuple[float, float, float, float]]
ImageType = Union[Tensor, PILImage]
def get_augmenter(augmenter_type: str,
image_size: ImageSizeType,
dataset_mean: DatasetStatType,
dataset_std: DatasetStatType,
padding: PaddingInputType = 1. / 8.,
pad_if_needed: bool = False,
subset_size: int = 2) -> Union[Module, Callable]:
"""
Args:
augmenter_type: augmenter type
image_size: (height, width) image size
dataset_mean: dataset mean value in CHW
dataset_std: dataset standard deviation in CHW
padding: percent of image size to pad on each border of the image. If a sequence of length 4 is provided,
it is used to pad left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is
used to pad left/right, top/bottom borders, respectively.
pad_if_needed: bool flag for RandomCrop "pad_if_needed" option
subset_size: number of augmentations used in subset
Returns: nn.Module for Kornia augmentation or Callable for torchvision transform
"""
if not isinstance(padding, tuple):
assert isinstance(padding, float)
padding = (padding, padding, padding, padding)
assert len(padding) == 2 or len(padding) == 4
if len(padding) == 2:
# padding of length 2 is used to pad left/right, top/bottom borders, respectively
# padding of length 4 is used to pad left, top, right, bottom borders respectively
padding = (padding[0], padding[1], padding[0], padding[1])
# image_size is of shape (h,w); padding values is [left, top, right, bottom] borders
padding = (
int(image_size[1] * padding[0]),
int(image_size[0] * padding[1]),
int(image_size[1] * padding[2]),
int(image_size[0] * padding[3])
)
augmenter_type = augmenter_type.strip().lower()
if augmenter_type == "simple":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "fixed":
return nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomVerticalFlip(p=0.2),
K.RandomResizedCrop(size=image_size, scale=(0.8, 1.0), ratio=(1., 1.)),
RandomAugmentation(
p=0.5,
augmentation=F.GaussianBlur2d(
kernel_size=(3, 3),
sigma=(1.5, 1.5),
border_type='constant'
)
),
K.ColorJitter(contrast=(0.75, 1.5)),
# additive Gaussian noise
K.RandomErasing(p=0.1),
# Multiply
K.RandomAffine(
degrees=(-25., 25.),
translate=(0.2, 0.2),
scale=(0.8, 1.2),
shear=(-8., 8.)
),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type in ["validation", "test"]:
return nn.Sequential(
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "randaugment":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
RandAugmentNS(n=subset_size, m=10),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
else:
raise NotImplementedError(f"\"{augmenter_type}\" is not a supported augmenter type")
__all__ = [
# modules
# classes
# functions
"get_augmenter",
]
|
[
"kornia.augmentation.RandomResizedCrop",
"kornia.augmentation.ColorJitter",
"kornia.filters.GaussianBlur2d",
"kornia.augmentation.RandomCrop",
"kornia.augmentation.RandomErasing",
"kornia.augmentation.RandomAffine",
"kornia.augmentation.RandomHorizontalFlip",
"torch.tensor"
] |
[((2529, 2632), 'kornia.augmentation.RandomCrop', 'K.RandomCrop', ([], {'size': 'image_size', 'padding': 'padding', 'pad_if_needed': 'pad_if_needed', 'padding_mode': '"""reflect"""'}), "(size=image_size, padding=padding, pad_if_needed=pad_if_needed,\n padding_mode='reflect')\n", (2541, 2632), True, 'from kornia import augmentation as K\n'), ((2667, 2696), 'kornia.augmentation.RandomHorizontalFlip', 'K.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2689, 2696), True, 'from kornia import augmentation as K\n'), ((2942, 2971), 'kornia.augmentation.RandomHorizontalFlip', 'K.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2964, 2971), True, 'from kornia import augmentation as K\n'), ((3028, 3100), 'kornia.augmentation.RandomResizedCrop', 'K.RandomResizedCrop', ([], {'size': 'image_size', 'scale': '(0.8, 1.0)', 'ratio': '(1.0, 1.0)'}), '(size=image_size, scale=(0.8, 1.0), ratio=(1.0, 1.0))\n', (3047, 3100), True, 'from kornia import augmentation as K\n'), ((3368, 3403), 'kornia.augmentation.ColorJitter', 'K.ColorJitter', ([], {'contrast': '(0.75, 1.5)'}), '(contrast=(0.75, 1.5))\n', (3381, 3403), True, 'from kornia import augmentation as K\n'), ((3455, 3477), 'kornia.augmentation.RandomErasing', 'K.RandomErasing', ([], {'p': '(0.1)'}), '(p=0.1)\n', (3470, 3477), True, 'from kornia import augmentation as K\n'), ((3514, 3615), 'kornia.augmentation.RandomAffine', 'K.RandomAffine', ([], {'degrees': '(-25.0, 25.0)', 'translate': '(0.2, 0.2)', 'scale': '(0.8, 1.2)', 'shear': '(-8.0, 8.0)'}), '(degrees=(-25.0, 25.0), translate=(0.2, 0.2), scale=(0.8, 1.2\n ), shear=(-8.0, 8.0))\n', (3528, 3615), True, 'from kornia import augmentation as K\n'), ((2727, 2774), 'torch.tensor', 'torch.tensor', (['dataset_mean'], {'dtype': 'torch.float32'}), '(dataset_mean, dtype=torch.float32)\n', (2739, 2774), False, 'import torch\n'), ((2804, 2850), 'torch.tensor', 'torch.tensor', (['dataset_std'], {'dtype': 'torch.float32'}), '(dataset_std, dtype=torch.float32)\n', (2816, 2850), False, 'import torch\n'), ((3184, 3262), 'kornia.filters.GaussianBlur2d', 'F.GaussianBlur2d', ([], {'kernel_size': '(3, 3)', 'sigma': '(1.5, 1.5)', 'border_type': '"""constant"""'}), "(kernel_size=(3, 3), sigma=(1.5, 1.5), border_type='constant')\n", (3200, 3262), True, 'from kornia import filters as F\n'), ((3715, 3762), 'torch.tensor', 'torch.tensor', (['dataset_mean'], {'dtype': 'torch.float32'}), '(dataset_mean, dtype=torch.float32)\n', (3727, 3762), False, 'import torch\n'), ((3792, 3838), 'torch.tensor', 'torch.tensor', (['dataset_std'], {'dtype': 'torch.float32'}), '(dataset_std, dtype=torch.float32)\n', (3804, 3838), False, 'import torch\n'), ((4183, 4286), 'kornia.augmentation.RandomCrop', 'K.RandomCrop', ([], {'size': 'image_size', 'padding': 'padding', 'pad_if_needed': 'pad_if_needed', 'padding_mode': '"""reflect"""'}), "(size=image_size, padding=padding, pad_if_needed=pad_if_needed,\n padding_mode='reflect')\n", (4195, 4286), True, 'from kornia import augmentation as K\n'), ((4321, 4350), 'kornia.augmentation.RandomHorizontalFlip', 'K.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4343, 4350), True, 'from kornia import augmentation as K\n'), ((3962, 4009), 'torch.tensor', 'torch.tensor', (['dataset_mean'], {'dtype': 'torch.float32'}), '(dataset_mean, dtype=torch.float32)\n', (3974, 4009), False, 'import torch\n'), ((4039, 4085), 'torch.tensor', 'torch.tensor', (['dataset_std'], {'dtype': 'torch.float32'}), '(dataset_std, dtype=torch.float32)\n', (4051, 4085), False, 'import torch\n'), ((4429, 4476), 'torch.tensor', 'torch.tensor', (['dataset_mean'], {'dtype': 'torch.float32'}), '(dataset_mean, dtype=torch.float32)\n', (4441, 4476), False, 'import torch\n'), ((4506, 4552), 'torch.tensor', 'torch.tensor', (['dataset_std'], {'dtype': 'torch.float32'}), '(dataset_std, dtype=torch.float32)\n', (4518, 4552), False, 'import torch\n')]
|
# code-checked
# server-checked
import os
# NOTE! NOTE! NOTE! make sure you run this code inside the kitti_raw directory (/root/data/kitti_raw)
kitti_depth_path = "/root/data/kitti_depth"
rgb_depth_path = "/root/data/kitti_rgb"
train_dirs = os.listdir(kitti_depth_path + "/train") # (contains "2011_09_26_drive_0001_sync" and so on)
val_dirs = os.listdir(kitti_depth_path + "/val")
# Create "train" and "val" dir for RGB
rgb_train_dir = os.path.join(rgb_depth_path, "train")
rgb_val_dir = os.path.join(rgb_depth_path, "val")
os.system("mkdir %s" % rgb_train_dir)
os.system("mkdir %s" % rgb_val_dir)
print ("num train dirs: %d" % len(train_dirs))
print ("num val dirs: %d" % len(val_dirs))
# Training set
for step, dir_name in enumerate(train_dirs):
print("########################### Training set #########################################")
print("step %d/%d" % (step+1, len(train_dirs)))
print(dir_name)
dir_name_no_sync = dir_name.split("_sync")[0] # (dir_name_no_sync == "2011_09_26_drive_0001")
# download the zip file:
os.system("wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip" % (dir_name_no_sync, dir_name))
# unzip:
os.system("unzip %s.zip" % dir_name)
# move to rgb dir
zip_dir = dir_name.split('_drive')[0]
os.system("mv %s %s" % (os.path.join(zip_dir, dir_name), rgb_train_dir))
# Validation set
for step, dir_name in enumerate(val_dirs):
print("########################### Training set #########################################")
print("step %d/%d" % (step+1, len(val_dirs)))
print(dir_name)
dir_name_no_sync = dir_name.split("_sync")[0] # (dir_name_no_sync == "2011_09_26_drive_0001")
# download the zip file:
os.system("wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip" % (dir_name_no_sync, dir_name))
# unzip:
os.system("unzip %s.zip" % dir_name)
# move to rgb dir
zip_dir = dir_name.split('_drive')[0]
os.system("mv %s %s" % (os.path.join(zip_dir, dir_name), rgb_val_dir))
|
[
"os.system",
"os.path.join",
"os.listdir"
] |
[((244, 283), 'os.listdir', 'os.listdir', (["(kitti_depth_path + '/train')"], {}), "(kitti_depth_path + '/train')\n", (254, 283), False, 'import os\n'), ((347, 384), 'os.listdir', 'os.listdir', (["(kitti_depth_path + '/val')"], {}), "(kitti_depth_path + '/val')\n", (357, 384), False, 'import os\n'), ((441, 478), 'os.path.join', 'os.path.join', (['rgb_depth_path', '"""train"""'], {}), "(rgb_depth_path, 'train')\n", (453, 478), False, 'import os\n'), ((493, 528), 'os.path.join', 'os.path.join', (['rgb_depth_path', '"""val"""'], {}), "(rgb_depth_path, 'val')\n", (505, 528), False, 'import os\n'), ((529, 566), 'os.system', 'os.system', (["('mkdir %s' % rgb_train_dir)"], {}), "('mkdir %s' % rgb_train_dir)\n", (538, 566), False, 'import os\n'), ((567, 602), 'os.system', 'os.system', (["('mkdir %s' % rgb_val_dir)"], {}), "('mkdir %s' % rgb_val_dir)\n", (576, 602), False, 'import os\n'), ((1055, 1179), 'os.system', 'os.system', (["('wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip' %\n (dir_name_no_sync, dir_name))"], {}), "(\n 'wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip' %\n (dir_name_no_sync, dir_name))\n", (1064, 1179), False, 'import os\n'), ((1189, 1225), 'os.system', 'os.system', (["('unzip %s.zip' % dir_name)"], {}), "('unzip %s.zip' % dir_name)\n", (1198, 1225), False, 'import os\n'), ((1728, 1852), 'os.system', 'os.system', (["('wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip' %\n (dir_name_no_sync, dir_name))"], {}), "(\n 'wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip' %\n (dir_name_no_sync, dir_name))\n", (1737, 1852), False, 'import os\n'), ((1862, 1898), 'os.system', 'os.system', (["('unzip %s.zip' % dir_name)"], {}), "('unzip %s.zip' % dir_name)\n", (1871, 1898), False, 'import os\n'), ((1319, 1350), 'os.path.join', 'os.path.join', (['zip_dir', 'dir_name'], {}), '(zip_dir, dir_name)\n', (1331, 1350), False, 'import os\n'), ((1992, 2023), 'os.path.join', 'os.path.join', (['zip_dir', 'dir_name'], {}), '(zip_dir, dir_name)\n', (2004, 2023), False, 'import os\n')]
|
import warnings
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List
import xarray as xr
from _echopype_version import version as ECHOPYPE_VERSION
from ..core import SONAR_MODELS
from ..qc import coerce_increasing_time, exist_reversed_time
from .echodata import EchoData
def union_attrs(datasets: List[xr.Dataset]) -> Dict[str, Any]:
"""
Merges attrs from a list of datasets.
Prioritizes keys from later datsets.
"""
total_attrs = dict()
for ds in datasets:
total_attrs.update(ds.attrs)
return total_attrs
def assemble_combined_provenance(input_paths):
return xr.Dataset(
data_vars={
"src_filenames": ("file", input_paths),
},
attrs={
"conversion_software_name": "echopype",
"conversion_software_version": ECHOPYPE_VERSION,
"conversion_time": datetime.utcnow().isoformat(timespec="seconds")
+ "Z", # use UTC time
},
)
def combine_echodata(echodatas: List[EchoData], combine_attrs="override") -> EchoData:
"""
Combines multiple `EchoData` objects into a single `EchoData` object.
Parameters
----------
echodatas: List[EchoData]
The list of `EchoData` objects to be combined.
combine_attrs: { "override", "drop", "identical", "no_conflicts", "overwrite_conflicts" }
String indicating how to combine attrs of the `EchoData` objects being merged.
This parameter matches the identically named xarray parameter
(see https://xarray.pydata.org/en/latest/generated/xarray.combine_nested.html)
with the exception of the "overwrite_conflicts" value.
* "override": Default. skip comparing and copy attrs from the first `EchoData`
object to the result.
* "drop": empty attrs on returned `EchoData` object.
* "identical": all attrs must be the same on every object.
* "no_conflicts": attrs from all objects are combined,
any that have the same name must also have the same value.
* "overwrite_conflicts": attrs from all `EchoData` objects are combined,
attrs with conflicting keys will be overwritten by later `EchoData` objects.
Returns
-------
EchoData
An `EchoData` object with all of the data from the input `EchoData` objects combined.
Raises
------
ValueError
If `echodatas` contains `EchoData` objects with different or `None` `sonar_model` values
(i.e., all `EchoData` objects must have the same non-None `sonar_model` value).
ValueError
If EchoData objects have conflicting source file names.
Warns
-----
UserWarning
If the `sonar_model` of the input `EchoData` objects is `"EK60"` and any `EchoData` objects
have non-monotonically increasing `ping_time`, `location_time` or `mru_time` values,
the corresponding values in the output `EchoData` object will be increased starting at the
timestamp where the reversal occurs such that all values in the output are monotonically
increasing. Additionally, the original `ping_time`, `location_time` or `mru_time` values
will be stored in the `Provenance` group, although this behavior may change in future
versions.
Warnings
--------
Changes in parameters between `EchoData` objects are not currently checked;
however, they may raise an error in future versions.
Notes
-----
* `EchoData` objects are combined by combining their groups individually.
* Attributes from all groups before the combination will be stored in the provenance group,
although this behavior may change in future versions.
* The `source_file` and `converted_raw_path` attributes will be copied from the first
`EchoData` object in the given list, but this may change in future versions.
Examples
--------
>>> ed1 = echopype.open_converted("file1.nc")
>>> ed2 = echopype.open_converted("file2.zarr")
>>> combined = echopype.combine_echodata([ed1, ed2])
"""
result = EchoData()
if len(echodatas) == 0:
return result
result.source_file = echodatas[0].source_file
result.converted_raw_path = echodatas[0].converted_raw_path
sonar_model = None
for echodata in echodatas:
if echodata.sonar_model is None:
raise ValueError(
"all EchoData objects must have non-None sonar_model values"
)
elif sonar_model is None:
sonar_model = echodata.sonar_model
elif echodata.sonar_model != sonar_model:
raise ValueError(
"all EchoData objects must have the same sonar_model value"
)
# ping time before reversal correction
old_ping_time = None
# ping time after reversal correction
new_ping_time = None
# location time before reversal correction
old_location_time = None
# location time after reversal correction
new_location_time = None
# mru time before reversal correction
old_mru_time = None
# mru time after reversal correction
new_mru_time = None
# all attributes before combination
# { group1: [echodata1 attrs, echodata2 attrs, ...], ... }
old_attrs: Dict[str, List[Dict[str, Any]]] = dict()
for group in EchoData.group_map:
group_datasets = [
getattr(echodata, group)
for echodata in echodatas
if getattr(echodata, group) is not None
]
if group in ("top", "sonar"):
combined_group = getattr(echodatas[0], group)
elif group == "provenance":
combined_group = assemble_combined_provenance(
[
echodata.source_file
if echodata.source_file is not None
else echodata.converted_raw_path
for echodata in echodatas
]
)
else:
if len(group_datasets) == 0:
setattr(result, group, None)
continue
concat_dim = SONAR_MODELS[sonar_model]["concat_dims"].get(
group, SONAR_MODELS[sonar_model]["concat_dims"]["default"]
)
concat_data_vars = SONAR_MODELS[sonar_model]["concat_data_vars"].get(
group, SONAR_MODELS[sonar_model]["concat_data_vars"]["default"]
)
combined_group = xr.combine_nested(
group_datasets,
[concat_dim],
data_vars=concat_data_vars,
coords="minimal",
combine_attrs="drop"
if combine_attrs == "overwrite_conflicts"
else combine_attrs,
)
if combine_attrs == "overwrite_conflicts":
combined_group.attrs.update(union_attrs(group_datasets))
if group == "beam":
if sonar_model == "EK80":
combined_group["transceiver_software_version"] = combined_group[
"transceiver_software_version"
].astype("<U10")
combined_group["channel_id"] = combined_group["channel_id"].astype(
"<U50"
)
elif sonar_model == "EK60":
combined_group["gpt_software_version"] = combined_group[
"gpt_software_version"
].astype("<U10")
combined_group["channel_id"] = combined_group["channel_id"].astype(
"<U50"
)
if sonar_model in ("EK60", "EK80"):
if "ping_time" in combined_group and exist_reversed_time(
combined_group, "ping_time"
):
if old_ping_time is None:
warnings.warn(
f"{sonar_model} ping_time reversal detected; the ping times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_ping_time = combined_group["ping_time"]
coerce_increasing_time(combined_group, time_name="ping_time")
new_ping_time = combined_group["ping_time"]
else:
combined_group["ping_time"] = new_ping_time
if "location_time" in combined_group and exist_reversed_time(
combined_group, "location_time"
):
if group != "nmea":
if old_location_time is None:
warnings.warn(
f"{sonar_model} location_time reversal detected; the location times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_location_time = combined_group["location_time"]
coerce_increasing_time(
combined_group, time_name="location_time"
)
new_location_time = combined_group["location_time"]
else:
combined_group["location_time"] = new_location_time
if sonar_model == "EK80":
if "mru_time" in combined_group and exist_reversed_time(
combined_group, "mru_time"
):
if old_mru_time is None:
warnings.warn(
f"{sonar_model} mru_time reversal detected; the mru times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_mru_time = combined_group["mru_time"]
coerce_increasing_time(combined_group, time_name="mru_time")
new_mru_time = combined_group["mru_time"]
else:
combined_group["mru_time"] = new_mru_time
if len(group_datasets) > 1:
old_attrs[group] = [group_dataset.attrs for group_dataset in group_datasets]
if combined_group is not None:
# xarray inserts this dimension when concating along multiple dimensions
combined_group = combined_group.drop_dims("concat_dim", errors="ignore")
setattr(result, group, combined_group)
# save ping time before reversal correction
if old_ping_time is not None:
result.provenance["old_ping_time"] = old_ping_time
result.provenance.attrs["reversed_ping_times"] = 1
# save location time before reversal correction
if old_location_time is not None:
result.provenance["old_location_time"] = old_location_time
result.provenance.attrs["reversed_ping_times"] = 1
# save mru time before reversal correction
if old_mru_time is not None:
result.provenance["old_mru_time"] = old_mru_time
result.provenance.attrs["reversed_ping_times"] = 1
# TODO: possible parameter to disable original attributes and original ping_time storage
# in provenance group?
# save attrs from before combination
for group in old_attrs:
all_group_attrs = set()
for group_attrs in old_attrs[group]:
for attr in group_attrs:
all_group_attrs.add(attr)
echodata_filenames = []
for ed in echodatas:
if ed.source_file is not None:
filepath = ed.source_file
elif ed.converted_raw_path is not None:
filepath = ed.converted_raw_path
else:
# unreachable
raise ValueError("EchoData object does not have a file path")
filename = Path(filepath).name
if filename in echodata_filenames:
raise ValueError("EchoData objects have conflicting filenames")
echodata_filenames.append(filename)
attrs = xr.DataArray(
[
[group_attrs.get(attr) for attr in all_group_attrs]
for group_attrs in old_attrs[group]
],
coords={
"echodata_filename": echodata_filenames,
f"{group}_attr_key": list(all_group_attrs),
},
dims=["echodata_filename", f"{group}_attr_key"],
)
result.provenance = result.provenance.assign({f"{group}_attrs": attrs})
# Add back sonar model
result.sonar_model = sonar_model
return result
|
[
"datetime.datetime.utcnow",
"pathlib.Path",
"warnings.warn",
"xarray.combine_nested"
] |
[((6455, 6639), 'xarray.combine_nested', 'xr.combine_nested', (['group_datasets', '[concat_dim]'], {'data_vars': 'concat_data_vars', 'coords': '"""minimal"""', 'combine_attrs': "('drop' if combine_attrs == 'overwrite_conflicts' else combine_attrs)"}), "(group_datasets, [concat_dim], data_vars=concat_data_vars,\n coords='minimal', combine_attrs='drop' if combine_attrs ==\n 'overwrite_conflicts' else combine_attrs)\n", (6472, 6639), True, 'import xarray as xr\n'), ((11949, 11963), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (11953, 11963), False, 'from pathlib import Path\n'), ((899, 916), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (914, 916), False, 'from datetime import datetime\n'), ((7886, 8049), 'warnings.warn', 'warnings.warn', (['f"""{sonar_model} ping_time reversal detected; the ping times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)"""'], {}), "(\n f'{sonar_model} ping_time reversal detected; the ping times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)'\n )\n", (7899, 8049), False, 'import warnings\n'), ((9659, 9820), 'warnings.warn', 'warnings.warn', (['f"""{sonar_model} mru_time reversal detected; the mru times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)"""'], {}), "(\n f'{sonar_model} mru_time reversal detected; the mru times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)'\n )\n", (9672, 9820), False, 'import warnings\n'), ((8720, 8891), 'warnings.warn', 'warnings.warn', (['f"""{sonar_model} location_time reversal detected; the location times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)"""'], {}), "(\n f'{sonar_model} location_time reversal detected; the location times will be corrected (see https://github.com/OSOceanAcoustics/echopype/pull/297)'\n )\n", (8733, 8891), False, 'import warnings\n')]
|
from dataclasses import dataclass, field
from typing import Any, Dict, Iterable, Optional, Tuple, Union
from flask import Flask, Blueprint, request, Response
from flask.views import View
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.routing import Map, MapAdapter, Rule
@dataclass
class RouteMeta:
route_path: str
methods: Iterable[str]
options: Dict = field(default_factory=dict)
route_keywords = {'index', 'get', 'post', 'put', 'patch', 'delete'}
def _lstrip(text: str, chars: str) -> str:
"""
Given a string `text`, remove the leading `chars` if and only if they
appear as the leading characters
"""
if chars and text[:len(chars)] == chars:
text = text[len(chars):]
return text
def route(path: str, methods: Iterable[str] = ('GET',), **options):
def decorator(func):
meta = RouteMeta(route_path=path, methods=methods, options=options)
func.route_meta = [meta, *getattr(func, 'route_meta', [])]
return func
return decorator
class ViewMeta(type):
def __new__(
mcs,
name: str,
bases: Tuple[type, ...],
attrs: Dict[str, Any]
):
url_map = Map()
# For bases, take the attrs that were not overridden and re-add them
# so they get processed
for base in bases:
if base_map := getattr(base, 'url_map', None): # type: Map
for rule in base_map.iter_rules():
if rule.endpoint not in attrs:
attrs[rule.endpoint] = getattr(base, rule.endpoint)
# Iterate over functions in the class
# If the function has the `@route` decorator, then:
# For each `@route` definition:
# Add a rule for that `@route`
for func_name, func in attrs.items():
if meta_list := getattr(func, 'route_meta', None):
for meta in meta_list: # type: RouteMeta
url_map.add(Rule(
meta.route_path,
methods=meta.methods,
endpoint=func_name,
**meta.options
))
# Register specially named routes that don't have `@route`
path = '/' if func_name in {'index', 'post'} else '/<id>'
if func_name in route_keywords and not hasattr(func, 'route_meta'):
url_map.add(Rule(
path,
methods=['get' if func_name == 'index' else func_name],
endpoint=func_name
))
attrs['url_map'] = url_map
return super().__new__(mcs, name, bases, attrs)
class GenericView(View, metaclass=ViewMeta):
route_base: Optional[str] = None
route_prefix: Optional[str] = None
_bp_prefix: Optional[str] = None # Placeholder for prefix on the BP
def dispatch_request(self, **kwargs):
bp_prefix = self._bp_prefix or ''
prefix = self.route_prefix or ''
base = self.route_base or ''
path = _lstrip(request.url_rule.rule, bp_prefix) # strip bp_prefix
path = _lstrip(path, prefix) # strip class prefix
path = _lstrip(path, base) # strip route base
method = request.method.lower()
view_func, _ = self.url_map_adapter.match(path, method)
if func := getattr(self, view_func, None):
self.before_view_func()
rv = func(**kwargs)
if type(rv) != tuple: # Flask view responses can be tuples
rv = (rv,)
return self.after_view_func(*rv)
raise MethodNotAllowed()
def before_view_func(self): pass
def after_view_func(self, response: Response, status: int = 200):
return response, status
@classmethod
def register(cls, app_or_bp: Union[Blueprint, Flask]):
# If the blueprint has a url_prefix, stash it on the class
if isinstance(app_or_bp, Blueprint):
cls._bp_prefix = app_or_bp.url_prefix
prefix = cls.route_prefix or ''
base = cls.route_base or ''
view = cls.as_view(cls.__name__)
cls.url_map_adapter: MapAdapter = cls.url_map.bind('')
_opts = {'rule', 'map', 'endpoint', 'methods', 'is_leaf', 'arguments'}
for rule in cls.url_map.iter_rules():
opts = {
key: val
for key, val in rule.__dict__.items()
if key not in _opts and not key.startswith('_')
}
if 'defaults' in opts and opts['defaults'] is None:
del opts['defaults']
app_or_bp.add_url_rule(
f'{prefix}{base}{rule.rule}',
endpoint=f'{cls.__name__}:{rule.endpoint}',
view_func=view,
methods=rule.methods,
**opts
)
|
[
"flask.request.method.lower",
"werkzeug.routing.Rule",
"dataclasses.field",
"werkzeug.routing.Map",
"werkzeug.exceptions.MethodNotAllowed"
] |
[((385, 412), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (390, 412), False, 'from dataclasses import dataclass, field\n'), ((1189, 1194), 'werkzeug.routing.Map', 'Map', ([], {}), '()\n', (1192, 1194), False, 'from werkzeug.routing import Map, MapAdapter, Rule\n'), ((3247, 3269), 'flask.request.method.lower', 'request.method.lower', ([], {}), '()\n', (3267, 3269), False, 'from flask import Flask, Blueprint, request, Response\n'), ((3614, 3632), 'werkzeug.exceptions.MethodNotAllowed', 'MethodNotAllowed', ([], {}), '()\n', (3630, 3632), False, 'from werkzeug.exceptions import MethodNotAllowed\n'), ((2422, 2513), 'werkzeug.routing.Rule', 'Rule', (['path'], {'methods': "['get' if func_name == 'index' else func_name]", 'endpoint': 'func_name'}), "(path, methods=['get' if func_name == 'index' else func_name], endpoint\n =func_name)\n", (2426, 2513), False, 'from werkzeug.routing import Map, MapAdapter, Rule\n'), ((1973, 2052), 'werkzeug.routing.Rule', 'Rule', (['meta.route_path'], {'methods': 'meta.methods', 'endpoint': 'func_name'}), '(meta.route_path, methods=meta.methods, endpoint=func_name, **meta.options)\n', (1977, 2052), False, 'from werkzeug.routing import Map, MapAdapter, Rule\n')]
|
import mapel
import mapel.voting.elections.mallows as mallows
from PIL import Image, ImageDraw
from math import sqrt
from sys import argv
def getrgb(value, MAX):
x = int(255 * value / MAX)
return (x, x, x)
def getrgb_uniform(value, MAX):
x = int(255 * value)
return (x, x, x)
def getsqrtrgb(value, MAX):
x = int(255 * (value ** 0.33) / (MAX ** 0.33))
return (x, x, x)
def getsqrtrgb_uniform(value, MAX):
x = int(255 * (value ** 0.25))
return (x, x, x)
def matrix2png(argv):
# introduce yourself
if len(argv) < 4:
print("Invocation:")
print(" python3 matrix2png num_candidates model_id reorder [param1]")
print(
" reorder -- election_id of the model_id to try to resemble (e.g., ID, or AN); use org to use original order")
print("")
exit()
# gather arguments
m = int(argv[1])
n = m * m
model = argv[2]
tgt = argv[3]
print("TGT:", tgt)
if len(argv) >= 5:
param = float(argv[4])
else:
param = None
if model != "mallows":
name = "%s_%d_%s.png" % (model, m, tgt)
else:
name = "%s_phi%d_%d_%s.png" % (model, param * 100, m, tgt)
# prepare the experiment/matrix
experiment = mapel.prepare_experiment()
experiment.set_default_num_candidates(m)
experiment.set_default_num_voters(n)
# Compass Matrices
experiment.add_election(election_model="uniformity", election_id="UN", color=(1, 0.5, 0.5),
marker="X")
experiment.add_election(election_model="identity", election_id="ID", color="red", marker="X")
experiment.add_election(election_model="antagonism", election_id="AN", color="black",
marker="o")
experiment.add_election(election_model="stratification", election_id="ST", color="black")
# form the matrix
if model != "mallows":
experiment.add_election(election_model=model, election_id="M")
else:
experiment.add_election(election_model="norm-mallows_matrix", params={"norm-phi": param},
election_id="M")
M = experiment.elections["M"].matrix
# get the mapping to a given election
experiment.compute_distances()
if tgt == "org":
match = list(range(m))
else:
match = experiment.matchings[tgt]["M"]
print(match)
# get reversed matching
rev_match = [0] * m
for i in range(m):
rev_match[match[i]] = i
print(rev_match)
# create the image
img = Image.new("RGB", (m, m), color="black")
draw = ImageDraw.Draw(img)
MAX = 0 # highest value in the matrix
for y in range(m):
for x in range(m):
MAX = max(MAX, M[y][x])
color = lambda v: getsqrtrgb_uniform(v, MAX)
### print columns
print("----")
for x in range(m):
print("%.2f" % x, end=" ")
print()
print("----")
### draw the matrix
for y in range(m):
for x in range(m):
draw.point((x, y), fill=color(M[y][rev_match[x]]))
print("%.2f" % M[y][rev_match[x]], end=" ")
print()
# save the image
img.save(name)
print("MAX value:", MAX)
|
[
"PIL.ImageDraw.Draw",
"PIL.Image.new",
"mapel.prepare_experiment"
] |
[((1256, 1282), 'mapel.prepare_experiment', 'mapel.prepare_experiment', ([], {}), '()\n', (1280, 1282), False, 'import mapel\n'), ((2536, 2575), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(m, m)'], {'color': '"""black"""'}), "('RGB', (m, m), color='black')\n", (2545, 2575), False, 'from PIL import Image, ImageDraw\n'), ((2587, 2606), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2601, 2606), False, 'from PIL import Image, ImageDraw\n')]
|
#!/usr/bin/env python3
#
# Data pipelines for Edge Computing in Python.
#
# Inspired by Google Media pipelines
#
# Dataflow can be within a "process" and then hook in locally
# But can also be via a "bus" or other communication mechanism
#
# Example: Draw detections
#
# Input 1. Picture
# Input 2. Detections [...]
#
# They can come in one single combined data-packet och as a picture that should be "annotated"
# with labels
#
import cv2
import sys
import time
from calculators.image import *
from calculators.mqtt import *
from calculators.hand import *
from calculators.audio import *
from calculators.core import *
from google.protobuf import text_format
import pipeconfig_pb2
import sched
import importlib
import argparse
def _resolve_class(class_name):
"""Return a class instance based on the string representation"""
if class_name in globals():
return globals()[class_name]
class_info = class_name.rsplit('.', 1)
if len(class_info) != 2:
raise PipelineError(f"Could not resolve class name {class_name}")
try:
m = importlib.import_module(class_info[0])
try:
return getattr(m, class_info[1])
except AttributeError:
raise PipelineError(f"Class {class_name} does not exist")
except ImportError:
raise PipelineError(f"Could not find module for class {class_name}")
def _add_stream_input_node(stream_data, name, node):
if name not in stream_data:
stream_data[name] = []
stream_data[name].append((node, node.get_input_index(name)))
def _merge_options(mapoptions):
options = {**mapoptions.doubleOptions, **mapoptions.stringOptions}
return options
class PipelineError(Exception):
"""Exception raised for errors setting up the edge pipeline."""
def __init__(self, message):
super().__init__(message)
class Pipeline:
def __init__(self):
self.scheduler = sched.scheduler(time.time, time.sleep)
self.streaming_data = {}
self.pipeline = []
self.do_exit = False
self.run_pipeline = False
self.run_step = 0
self.elapsed = {}
self.count = {}
def add_node(self, calculator, prefix, options, input_streams, output_streams):
print("calculator", calculator)
node_class = _resolve_class(calculator)
n = node_class("Node:" + prefix + ":" + calculator, self.streaming_data, options=options)
n.set_input_names(input_streams)
n.set_output_names(output_streams)
for name in input_streams:
_add_stream_input_node(self.streaming_data, name, n)
self.pipeline.append(n)
def get_node_by_name(self, name):
return next((n for n in self.pipeline if n.name == name), None)
def get_nodes_by_type(self, node_class):
return [n for n in self.pipeline if isinstance(n, node_class)]
def get_nodes(self):
return list(self.pipeline)
# Setup a pipeline based on a configuration
def setup_pipeline(self, config, options=None, prefix=""):
if options is None:
options = {}
pipe = pipeconfig_pb2.CalculatorGraphConfig()
text_format.Parse(config, pipe)
print("Pipe-config:")
print(pipe)
print("Inputs:", pipe.input_stream)
print("Outputs:", pipe.output_stream)
# Should check if this already exists in the config...
# map_node_options: { key:"video"; value:"rtsp://192.168.1.237:7447/5c8d2bf990085177ff91c7a2_2" }
if "input_video" in pipe.input_stream:
ins = CaptureNode(prefix + "input_video", self.streaming_data, options=options.get('input_video', {}))
ins.set_input_names([])
ins.set_output_names([prefix + "input_video"])
self.pipeline.append(ins)
if "input_audio" in pipe.input_stream:
ins = AudioCaptureNode(prefix + "input_audio", self.streaming_data, options=options.get('input_audio', {}))
ins.set_input_names([])
ins.set_output_names([prefix + "input_audio"])
self.pipeline.append(ins)
if "output_video" in pipe.output_stream:
outs = ShowImage(prefix + "output_video", self.streaming_data)
outs.set_input_names([prefix + "output_video"])
outs.set_output_names([])
_add_stream_input_node(self.streaming_data, prefix + "output_video", outs)
self.pipeline.append(outs)
for nr, node in enumerate(pipe.node, start=1):
node_options = _merge_options(node.map_node_options)
self.add_node(node.calculator, prefix, node_options, list(map(lambda x: prefix + x, node.input_stream)),
list(map(lambda x: prefix + x, node.output_stream)))
for node in self.pipeline:
self.elapsed[node.name] = 0
self.count[node.name] = 0
return self.streaming_data, self.pipeline
def get_node_by_output(self, outputname):
return list(filter(lambda x: outputname in x.output, self.pipeline))
# Running with the main thread - as it make use of CV2s show image.
def run(self):
while not self.do_exit:
if self.run_pipeline or self.run_step > 0:
# Just process all nodes - they will produce output and process the input.
for node in self.pipeline:
t0 = time.time()
# Count elapsed time when processed!
if node.process_node():
t1 = time.time() - t0
self.elapsed[node.name] += t1
self.count[node.name] += 1
time.sleep(0.001)
self.run_step -= 1
else:
# Nothing running at the moment...
time.sleep(1)
# CV2 wait-key
if cv2.waitKey(1) & 0xFF == ord('q'):
return
self.scheduler.run()
cv2.destroyAllWindows()
def step(self):
self.run_step = 1
def start(self):
self.run_pipeline = True
def stop(self):
self.run_pipeline = False
# I always forget if it is quit or exit - so I have both...
def quit(self):
self.do_exit = True
def exit(self):
self.do_exit = True
# Either load a pbtxt file or use the default above
if __name__ == "__main__":
pipeline = Pipeline()
try:
args = sys.argv[1:]
p = argparse.ArgumentParser()
p.add_argument('--input', dest='input_video', default=None, help='video stream input')
p.add_argument('--input_audio', dest='input_audio', default=None, help='audio stream input')
p.add_argument('-n', '--dry-run', dest='dry_run', action='store_true', default=False,
help='test pipeline setup and exit')
p.add_argument('pipeline', nargs=1)
conopts = p.parse_args(args)
except Exception as e:
sys.exit(f"Illegal arguments: {e}")
print(f"Loading pipeline from {conopts.pipeline[0]}")
try:
with open(conopts.pipeline[0], "r") as f:
txt = f.read()
except FileNotFoundError:
sys.exit(f"Could not find the pipeline config file {conopts.pipeline[0]}")
opts = {}
if conopts.input_video:
video = int(conopts.input_video) if conopts.input_video.isnumeric() else conopts.input_video
opts['input_video'] = {'video': video}
if conopts.input_audio:
audio = int(conopts.input_audio) if conopts.input_audio.isnumeric() else conopts.input_audio
opts['input_audio'] = {'audio': audio}
pipeline.setup_pipeline(txt, options=opts)
if not conopts.dry_run:
pipeline.start()
pipeline.run()
|
[
"pipeconfig_pb2.CalculatorGraphConfig",
"argparse.ArgumentParser",
"importlib.import_module",
"cv2.waitKey",
"google.protobuf.text_format.Parse",
"time.sleep",
"sched.scheduler",
"time.time",
"cv2.destroyAllWindows",
"sys.exit"
] |
[((1068, 1106), 'importlib.import_module', 'importlib.import_module', (['class_info[0]'], {}), '(class_info[0])\n', (1091, 1106), False, 'import importlib\n'), ((1912, 1950), 'sched.scheduler', 'sched.scheduler', (['time.time', 'time.sleep'], {}), '(time.time, time.sleep)\n', (1927, 1950), False, 'import sched\n'), ((3106, 3144), 'pipeconfig_pb2.CalculatorGraphConfig', 'pipeconfig_pb2.CalculatorGraphConfig', ([], {}), '()\n', (3142, 3144), False, 'import pipeconfig_pb2\n'), ((3153, 3184), 'google.protobuf.text_format.Parse', 'text_format.Parse', (['config', 'pipe'], {}), '(config, pipe)\n', (3170, 3184), False, 'from google.protobuf import text_format\n'), ((5959, 5982), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5980, 5982), False, 'import cv2\n'), ((6460, 6485), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6483, 6485), False, 'import argparse\n'), ((6952, 6987), 'sys.exit', 'sys.exit', (['f"""Illegal arguments: {e}"""'], {}), "(f'Illegal arguments: {e}')\n", (6960, 6987), False, 'import sys\n'), ((7171, 7245), 'sys.exit', 'sys.exit', (['f"""Could not find the pipeline config file {conopts.pipeline[0]}"""'], {}), "(f'Could not find the pipeline config file {conopts.pipeline[0]}')\n", (7179, 7245), False, 'import sys\n'), ((5666, 5683), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (5676, 5683), False, 'import time\n'), ((5804, 5817), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5814, 5817), False, 'import time\n'), ((5386, 5397), 'time.time', 'time.time', ([], {}), '()\n', (5395, 5397), False, 'import time\n'), ((5860, 5874), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5871, 5874), False, 'import cv2\n'), ((5528, 5539), 'time.time', 'time.time', ([], {}), '()\n', (5537, 5539), False, 'import time\n')]
|
import math, threading, time
from .. import colors
from .. util import deprecated, log
from . import matrix_drawing as md
from . import font
from . layout import MultiLayout
from . geometry import make_matrix_coord_map_multi
from . geometry.matrix import (
make_matrix_coord_map, make_matrix_coord_map_positions)
ROTATION_WARNING = """
Matrix.rotation must be a multiple of 90 degrees but was in fact %s degress.
It was rounded to %s degrees."""
class Matrix(MultiLayout):
CLONE_ATTRS = MultiLayout.CLONE_ATTRS + (
'width', 'height', 'rotation', 'vert_flip', 'y_flip', 'serpentine',
'pixelSize')
def __init__(self, drivers, width=0, height=0,
rotation=0, vert_flip=False, y_flip=False,
serpentine=True,
threadedUpdate=False, brightness=255,
pixelSize=(1, 1), **kwargs):
"""Main class for matricies.
driver -- instance that inherits from DriverBase
width -- X axis size of matrix
height -- Y axis size of matrix
coord_map -- a 2D matrix defining the X,Y to strip index mapping.
Not needed in most cases
rotation -- how to rotate when generating the map.
Not used if coord_map specified
vert_flip - flips the generated map along the Y axis.
This along with rotation can achieve any orientation
"""
self.gen_multi = make_matrix_coord_map_multi
super().__init__(drivers, threadedUpdate, brightness, **kwargs)
rot_mod = rotation % 360
self.rotation = 90 * round(rot_mod / 90)
if self.rotation != rot_mod:
log.warning(ROTATION_WARNING, rotation, self.rotation)
self.width = width or getattr(self.drivers[0], 'width') or 0
self.height = height or getattr(self.drivers[0], 'height') or 0
self.vert_flip = vert_flip
self.y_flip = y_flip
self.serpentine = serpentine
self.pixelSize = pixelSize
pw, ph = self.pixelSize
# If both are 0, try to assume it's a square display.
if not (self.width or self.height):
square = int(math.sqrt(self.numLEDs))
if (square * square) == self.numLEDs:
self.width = self.height = square
else:
raise TypeError('No width or height passed but '
'the number of LEDs is not a perfect square')
if self.width * self.height > self.numLEDs:
raise ValueError(
'width * height cannot exceed total pixel count! %s * %s > %s'
% (self.width, self.height, self.numLEDs))
if not self.coord_map:
if len(self.drivers) == 1:
# TODO: this should really go into documentation
log.debug(
'Auto generating coordinate map. Use make_matrix_coord_map '
'directly if more control needed.')
# was switched to y_flip, but need to keep vert_flip available
y_flip = y_flip or vert_flip
self.coord_map = make_matrix_coord_map(
self.width, self.height,
serpentine=serpentine,
rotation=rotation,
y_flip=vert_flip)
elif self.drivers:
raise TypeError(
'Must provide coord_map if using multiple drivers!')
self.set_pixel_positions(
make_matrix_coord_map_positions(self.coord_map))
# If rotation is 90 or 270 degrees, dimensions need to be swapped so
# they match the matrix rotation.
if rotation in (90, 270):
w = self.width
h = self.height
self.width = h
self.height = w
self.texture = None
self.set = self._setColor
if pw < 0 or pw > self.width or ph < 0 or ph > self.height:
raise ValueError(
'pixelSize must be greater than 0 '
'and not larger than total matrix')
if self.width % pw != 0 or self.height % ph != 0:
raise ValueError(
'pixelSize must evenly divide into matrix dimensions!')
if pw == 1 and ph == 1:
self._set = self.__setNormal
else:
self._set = self.__setScaled
self.width = self.width / pw
self.height = self.height / ph
self.numLEDs = self.width * self.height
self.fonts = font.fonts
@property
def shape(self):
"""Returns ``width, height``"""
return self.width, self.height
def get(self, x, y):
"""
Return the pixel color at position (x, y), or Colors.black if that
position is out-of-bounds.
"""
try:
pixel = self.coord_map[y][x]
return self._get_base(pixel)
except IndexError:
return colors.COLORS.Black
def set(self, x, y, color):
"""Set the pixel color at position x, y."""
# The actual implementation of this method is computed at construction
# time and monkey-patched in from one of self._setTexture,
# self.__setNormal or self.__setScaled
raise NotImplementedError
def get_pixel_positions(self):
return make_matrix_coord_map_positions(self.coord_map)
def loadFont(self, name, height, width, data):
self.fonts[name] = {
'data': data,
'height': height,
'width': width
}
def setTexture(self, tex=None):
if tex is None:
self.texture = tex
self.set = self._setColor
return
if not isinstance(tex, list):
raise ValueError('Texture must be a list!')
if len(tex) != self.height:
raise ValueError(
'Given texture is must be {} high!'.format(self.height))
for r in tex:
if not isinstance(r, list):
raise ValueError('Texture rows must be lists!')
if len(r) != self.width:
raise ValueError(
'Texture rows must be {} wide!'.format(self.width))
self.texture = tex
self.set = self._setTexture
def __setNormal(self, x, y, color):
try:
pixel = self.coord_map[y][x]
self._set_base(pixel, color)
except IndexError:
pass
def __setScaled(self, x, y, color):
sx = x * self.pixelSize[0]
sy = y * self.pixelSize[1]
for xs in range(sx, sx + self.pixelSize[0]):
for ys in range(sy, sy + self.pixelSize[1]):
self.__setNormal(xs, ys, color)
# Set single pixel to Color value
def _setColor(self, x, y, color=None):
try:
self._set(x, y, color or (0, 0, 0))
except IndexError:
pass
def _setTexture(self, x, y, color=None):
if x >= 0 and y >= 0:
try:
self._set(x, y, color or self.texture[y][x])
except IndexError:
pass
def setHSV(self, x, y, hsv):
color = colors.hsv2rgb(hsv)
self._set(x, y, color)
def setRGB(self, x, y, r, g, b):
color = (r, g, b)
self._set(x, y, color)
##########################################################################
# Drawing Functions
# Lovingly borrowed from Adafruit
# https://github.com/adafruit/Adafruit-GFX-Library/blob/master/Adafruit_GFX.cpp
##########################################################################
def drawCircle(self, x0, y0, r, color=None):
"""
Draw a circle in an RGB color, with center x0, y0 and radius r.
"""
md.draw_circle(self.set, x0, y0, r, color)
def fillCircle(self, x0, y0, r, color=None):
"""
Draw a filled circle in an RGB color, with center x0, y0 and radius r.
"""
md.fill_circle(self.set, x0, y0, r, color)
def drawLine(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_line(self.set, x0, y0, x1, y1, color, colorFunc, aa)
# Bresenham's algorithm
def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.
Will draw beyond matrix bounds.
"""
md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)
# Xiaolin Wu's Line Algorithm
def wu_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.wu_line(self.set, x0, y0, x1, y1, color, colorFunc)
def drawRect(self, x, y, w, h, color=None, aa=False):
"""
Draw rectangle with top-left corner at x,y, width w and height h
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_rect(self.set, x, y, w, h, color, aa)
def fillRect(self, x, y, w, h, color=None, aa=False):
"""
Draw a solid rectangle with top-left corner at (x, y), width w and
height h.
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_rect(self.set, x, y, w, h, color, aa)
def fillScreen(self, color=None):
"""Fill the matrix with the given RGB color"""
md.fill_rect(self.set, 0, 0, self.width, self.height, color)
def drawRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_round_rect(self.set, x, y, w, h, r, color, aa)
def fillRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_round_rect(self.set, x, y, w, h, r, color, aa)
def drawTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)
:param aa: if True, use Bresenham's algorithm for line drawing;
Otherwise use Xiaolin Wu's algorithm
"""
md.draw_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
if deprecated.allowed(): # pragma: no cover
fillTrangle = fillTriangle
def drawChar(self, x, y, c, color, bg,
aa=False, font=font.default_font, font_scale=1):
"""
Draw a single character c at at (x, y) in an RGB color.
"""
md.draw_char(self.fonts, self.set, self.width, self.height,
x, y, c, color, bg, aa, font, font_scale)
def drawText(self, text, x=0, y=0, color=None,
bg=colors.COLORS.Off, aa=False, font=font.default_font,
font_scale=1):
"""
Draw a line of text starting at (x, y) in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_text(self.fonts, self.set, text, self.width, self.height,
x, y, color, bg, aa, font, font_scale)
if deprecated.allowed(): # pragma: no cover
LEDMatrix = Matrix
|
[
"math.sqrt"
] |
[((2157, 2180), 'math.sqrt', 'math.sqrt', (['self.numLEDs'], {}), '(self.numLEDs)\n', (2166, 2180), False, 'import math, threading, time\n')]
|
import discord
from discord.ext import commands
from core.classes import Cog_Extension
import requests
import os
data_prefix = {
"0": "天氣描述",
"1": "最高溫度",
"2": "最低溫度",
"3": "體感描述",
"4": "降水機率"
}
data_suffix = {
"0": "",
"1": "度",
"2": "度",
"3": "",
"4": "%"
}
time_range_title = {
"0": "時段一",
"1": "時段二",
"2": "時段三"
}
class WeatherQuery(Cog_Extension):
@commands.group()
async def wea(self, ctx):
pass
@wea.command()
async def query(self, ctx, target_county: str = ''):
response = requests.get(f'https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-C0032-001?Authorization={str(os.environ.get("PhantomTWWeatherApiKey"))}&format=json')
location_weather_data = response.json()["cwbopendata"]["dataset"]["location"]
county_weather_info = str()
if target_county == '':
target_county = ctx.author.roles[1].name
for item in location_weather_data:
if item["locationName"].find(target_county) != -1:
loc_json = item["weatherElement"]
county_weather_info += item["locationName"] + '\n'
for time_range in range(3):
county_weather_info += f'{time_range_title[str(time_range)]}::\n'
for (index, info) in enumerate(loc_json):
county_weather_info += f'{data_prefix[str(index)]}: {info["time"][time_range]["parameter"]["parameterName"]} {data_suffix[str(index)]}\n'
await ctx.send(county_weather_info)
county_weather_info = ''
def setup(bot):
bot.add_cog(WeatherQuery(bot))
|
[
"os.environ.get",
"discord.ext.commands.group"
] |
[((389, 405), 'discord.ext.commands.group', 'commands.group', ([], {}), '()\n', (403, 405), False, 'from discord.ext import commands\n'), ((643, 683), 'os.environ.get', 'os.environ.get', (['"""PhantomTWWeatherApiKey"""'], {}), "('PhantomTWWeatherApiKey')\n", (657, 683), False, 'import os\n')]
|
import fileinput
contents = [x.strip() for x in fileinput.input()]
departure = int(contents[0])
buses = contents[1].split(",")
# dummy big value to start with comparing
closest = 10000000000000000
for bus in buses:
if bus != "x":
bus = int(bus)
next_cycle = ((departure // bus) * bus + bus) - departure
if next_cycle < closest:
closest = next_cycle
bus_no = bus
print(bus_no*closest)
|
[
"fileinput.input"
] |
[((50, 67), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (65, 67), False, 'import fileinput\n')]
|
"""Training and testing the Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
See the following paper for more information on the Pairwise Differentiable Gradient Descent (PDGD) algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import copy
import itertools
from six.moves import zip
from tensorflow import dtypes
from ultra.learning_algorithm.base_algorithm import BaseAlgorithm
import ultra.utils as utils
import ultra
class PDGD(BaseAlgorithm):
"""The Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
This class implements the Pairwise Differentiable Gradient Descent (PDGD) algorithm based on the input layer
feed. See the following paper for more information on the algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
def __init__(self, data_set, exp_settings, forward_only=False):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
forward_only: Set true to conduct prediction only, false to conduct training.
"""
print('Build Pairwise Differentiable Gradient Descent (PDGD) algorithm.')
self.hparams = ultra.utils.hparams.HParams(
learning_rate=0.05, # Learning rate (\mu).
# Scalar for the probability distribution.
tau=1,
max_gradient_norm=1.0, # Clip gradients to this norm.
# Set strength for L2 regularization.
l2_loss=0.005,
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
self.model = None
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = data_set.feature_size
self.learning_rate = tf.Variable(
float(self.hparams.learning_rate), trainable=False)
# Feeds for inputs.
self.is_training = tf.placeholder(tf.bool, name="is_train")
self.docid_inputs = [] # a list of top documents
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
name="letor_features") # the letor features for the documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
name="docid_input{0}".format(i)))
self.labels.append(tf.placeholder(tf.float32, shape=[None],
name="label{0}".format(i)))
self.global_step = tf.Variable(0, trainable=False)
self.output = tf.concat(
self.get_ranking_scores(
self.docid_inputs,
is_training=self.is_training,
scope='ranking_model'),
1)
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(
metric, topn)(reshaped_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['eval'])
# Build model
if not forward_only:
self.rank_list_size = exp_settings['train_list_cutoff']
self.train_output = self.ranking_model(
self.rank_list_size, scope='ranking_model')
train_labels = self.labels[:self.rank_list_size]
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_train_labels = tf.transpose(
tf.convert_to_tensor(train_labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.train_output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(metric, topn)(
reshaped_train_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['train_eval'])
# Build training pair inputs only when it is training
self.positive_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="positive_docid_input")
self.negative_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="negative_docid_input")
self.pair_weights = tf.placeholder(
tf.float32, shape=[None], name="pair_weight")
# Build ranking loss
pair_scores = self.get_ranking_scores(
[self.positive_docid_inputs,
self.negative_docid_inputs], is_training=self.is_training, scope='ranking_model'
)
self.loss = tf.reduce_sum(
tf.math.multiply(
#self.pairwise_cross_entropy_loss(pair_scores[0], pair_scores[1]),
tf.reduce_sum(-tf.exp(pair_scores[0]) / (
tf.exp(pair_scores[0]) + tf.exp(pair_scores[1])), 1),
self.pair_weights
)
)
params = tf.trainable_variables()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * tf.nn.l2_loss(p)
# Select optimizer
self.optimizer_func = tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = tf.train.GradientDescentOptimizer
# Gradients and SGD update operation for training the model.
opt = self.optimizer_func(self.hparams.learning_rate)
self.gradients = tf.gradients(self.loss, params)
if self.hparams.max_gradient_norm > 0:
self.clipped_gradients, self.norm = tf.clip_by_global_norm(self.gradients,
self.hparams.max_gradient_norm)
self.updates = opt.apply_gradients(zip(self.clipped_gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Gradient Norm',
self.norm,
collections=['train'])
else:
self.norm = None
self.updates = opt.apply_gradients(zip(self.gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Learning Rate',
self.learning_rate,
collections=['train'])
tf.summary.scalar('Loss', self.loss, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.train_eval_summary = tf.summary.merge_all(key='train_eval')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, input_feed, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: (tf.Session) tensorflow session to use.
input_feed: (dictionary) A dictionary containing all the input feed data.
forward_only: whether to do the backward step (False) or only forward (True).
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
if not forward_only:
# Run the model to get ranking scores
input_feed[self.is_training.name] = False
rank_outputs = session.run(
[self.train_output, self.train_eval_summary], input_feed)
# reduce value to avoid numerical problems
rank_outputs[0] = np.array(rank_outputs[0])
rank_outputs[0] = rank_outputs[0] - \
np.amax(rank_outputs[0], axis=1, keepdims=True)
exp_ranking_scores = np.exp(self.hparams.tau * rank_outputs[0])
# Remove scores for padding documents
letor_features_length = len(input_feed[self.letor_features.name])
for i in range(len(input_feed[self.labels[0].name])):
for j in range(self.rank_list_size):
# not a valid doc
if input_feed[self.docid_inputs[j].name][i] == letor_features_length:
exp_ranking_scores[i][j] = 0.0
# Compute denominator for each position
denominators = np.cumsum(
exp_ranking_scores[:, ::-1], axis=1)[:, ::-1]
sum_log_denominators = np.sum(
np.log(
denominators,
out=np.zeros_like(denominators),
where=denominators > 0),
axis=1)
# Create training pairs based on the ranking scores and the labels
positive_docids, negative_docids, pair_weights = [], [], []
for i in range(len(input_feed[self.labels[0].name])):
# Generate pairs and compute weights
for j in range(self.rank_list_size):
l = self.rank_list_size - 1 - j
# not a valid doc
if input_feed[self.docid_inputs[l].name][i] == letor_features_length:
continue
if input_feed[self.labels[l].name][i] > 0: # a clicked doc
for k in range(l + 2):
# find a negative/unclicked doc
if k < self.rank_list_size and input_feed[self.labels[k]
.name][i] < input_feed[self.labels[l].name][i]:
# not a valid doc
if input_feed[self.docid_inputs[k]
.name][i] == letor_features_length:
continue
positive_docids.append(
input_feed[self.docid_inputs[l].name][i])
negative_docids.append(
input_feed[self.docid_inputs[k].name][i])
flipped_exp_scores = np.copy(
exp_ranking_scores[i])
flipped_exp_scores[k] = exp_ranking_scores[i][l]
flipped_exp_scores[l] = exp_ranking_scores[i][k]
flipped_denominator = np.cumsum(
flipped_exp_scores[::-1])[::-1]
sum_log_flipped_denominator = np.sum(
np.log(
flipped_denominator,
out=np.zeros_like(flipped_denominator),
where=flipped_denominator > 0))
#p_r = np.prod(rank_prob[i][min_i:max_i+1])
#p_rs = np.prod(flipped_rank_prob[min_i:max_i+1])
# weight = p_rs / (p_r + p_rs) = 1 / (1 +
# (d_rs/d_r)) = 1 / (1 + exp(log_drs - log_dr))
weight = 1.0 / \
(1.0 +
np.exp(min(sum_log_flipped_denominator -
sum_log_denominators[i], 20)))
if np.isnan(weight):
print('SOMETHING WRONG!!!!!!!')
print(
'sum_log_denominators[i] is nan: ' + str(np.isnan(sum_log_denominators[i])))
print('sum_log_flipped_denominator is nan ' +
str(np.isnan(sum_log_flipped_denominator)))
pair_weights.append(weight)
input_feed[self.positive_docid_inputs.name] = positive_docids
input_feed[self.negative_docid_inputs.name] = negative_docids
input_feed[self.pair_weights.name] = pair_weights
# Train the model
input_feed[self.is_training.name] = True
train_outputs = session.run([
self.updates, # Update Op that does SGD.
self.loss, # Loss for this batch.
self.train_summary # Summarize statistics.
], input_feed)
summary = utils.merge_TFSummary(
[rank_outputs[-1], train_outputs[-1]], [0.5, 0.5])
# loss, no outputs, summary.
return train_outputs[1], rank_outputs, summary
else:
input_feed[self.is_training.name] = False
output_feed = [
self.eval_summary, # Summarize statistics.
self.output # Model outputs
]
outputs = session.run(output_feed, input_feed)
return None, outputs[1], outputs[0] # loss, outputs, summary.
|
[
"tensorflow.trainable_variables",
"numpy.isnan",
"ultra.utils.hparams.HParams",
"six.moves.zip",
"tensorflow.global_variables",
"tensorflow.Variable",
"numpy.exp",
"tensorflow.clip_by_global_norm",
"numpy.zeros_like",
"numpy.copy",
"tensorflow.placeholder",
"numpy.cumsum",
"tensorflow.exp",
"tensorflow.gradients",
"tensorflow.summary.merge_all",
"tensorflow.summary.scalar",
"ultra.utils.merge_TFSummary",
"tensorflow.convert_to_tensor",
"numpy.amax",
"ultra.utils.make_ranking_metric_fn",
"numpy.array",
"tensorflow.nn.l2_loss"
] |
[((1873, 1991), 'ultra.utils.hparams.HParams', 'ultra.utils.hparams.HParams', ([], {'learning_rate': '(0.05)', 'tau': '(1)', 'max_gradient_norm': '(1.0)', 'l2_loss': '(0.005)', 'grad_strategy': '"""ada"""'}), "(learning_rate=0.05, tau=1, max_gradient_norm=\n 1.0, l2_loss=0.005, grad_strategy='ada')\n", (1900, 1991), False, 'import ultra\n'), ((2757, 2797), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_train"""'}), "(tf.bool, name='is_train')\n", (2771, 2797), True, 'import tensorflow as tf\n'), ((2886, 2973), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.feature_size]', 'name': '"""letor_features"""'}), "(tf.float32, shape=[None, self.feature_size], name=\n 'letor_features')\n", (2900, 2973), True, 'import tensorflow as tf\n'), ((3510, 3541), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3521, 3541), True, 'import tensorflow as tf\n'), ((8084, 8117), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train"""'}), "(key='train')\n", (8104, 8117), True, 'import tensorflow as tf\n'), ((8152, 8190), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train_eval"""'}), "(key='train_eval')\n", (8172, 8190), True, 'import tensorflow as tf\n'), ((8219, 8251), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""eval"""'}), "(key='eval')\n", (8239, 8251), True, 'import tensorflow as tf\n'), ((3855, 3888), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.labels'], {}), '(self.labels)\n', (3875, 3888), True, 'import tensorflow as tf\n'), ((5521, 5588), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""positive_docid_input"""'}), "(tf.int64, shape=[None], name='positive_docid_input')\n", (5535, 5588), True, 'import tensorflow as tf\n'), ((5647, 5714), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""negative_docid_input"""'}), "(tf.int64, shape=[None], name='negative_docid_input')\n", (5661, 5714), True, 'import tensorflow as tf\n'), ((5764, 5824), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""pair_weight"""'}), "(tf.float32, shape=[None], name='pair_weight')\n", (5778, 5824), True, 'import tensorflow as tf\n'), ((6478, 6502), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6500, 6502), True, 'import tensorflow as tf\n'), ((7035, 7066), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'params'], {}), '(self.loss, params)\n', (7047, 7066), True, 'import tensorflow as tf\n'), ((7855, 7932), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning Rate"""', 'self.learning_rate'], {'collections': "['train']"}), "('Learning Rate', self.learning_rate, collections=['train'])\n", (7872, 7932), True, 'import tensorflow as tf\n'), ((7994, 8053), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'self.loss'], {'collections': "['train']"}), "('Loss', self.loss, collections=['train'])\n", (8011, 8053), True, 'import tensorflow as tf\n'), ((8288, 8309), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8307, 8309), True, 'import tensorflow as tf\n'), ((9200, 9225), 'numpy.array', 'np.array', (['rank_outputs[0]'], {}), '(rank_outputs[0])\n', (9208, 9225), True, 'import numpy as np\n'), ((9373, 9415), 'numpy.exp', 'np.exp', (['(self.hparams.tau * rank_outputs[0])'], {}), '(self.hparams.tau * rank_outputs[0])\n', (9379, 9415), True, 'import numpy as np\n'), ((14032, 14104), 'ultra.utils.merge_TFSummary', 'utils.merge_TFSummary', (['[rank_outputs[-1], train_outputs[-1]]', '[0.5, 0.5]'], {}), '([rank_outputs[-1], train_outputs[-1]], [0.5, 0.5])\n', (14053, 14104), True, 'import ultra.utils as utils\n'), ((4271, 4350), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['eval']"}), "('%s_%d' % (metric, topn), metric_value, collections=['eval'])\n", (4288, 4350), True, 'import tensorflow as tf\n'), ((4821, 4855), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['train_labels'], {}), '(train_labels)\n', (4841, 4855), True, 'import tensorflow as tf\n'), ((7170, 7240), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.gradients', 'self.hparams.max_gradient_norm'], {}), '(self.gradients, self.hparams.max_gradient_norm)\n', (7192, 7240), True, 'import tensorflow as tf\n'), ((7501, 7569), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Gradient Norm"""', 'self.norm'], {'collections': "['train']"}), "('Gradient Norm', self.norm, collections=['train'])\n", (7518, 7569), True, 'import tensorflow as tf\n'), ((9292, 9339), 'numpy.amax', 'np.amax', (['rank_outputs[0]'], {'axis': '(1)', 'keepdims': '(True)'}), '(rank_outputs[0], axis=1, keepdims=True)\n', (9299, 9339), True, 'import numpy as np\n'), ((9926, 9972), 'numpy.cumsum', 'np.cumsum', (['exp_ranking_scores[:, ::-1]'], {'axis': '(1)'}), '(exp_ranking_scores[:, ::-1], axis=1)\n', (9935, 9972), True, 'import numpy as np\n'), ((4142, 4190), 'ultra.utils.make_ranking_metric_fn', 'ultra.utils.make_ranking_metric_fn', (['metric', 'topn'], {}), '(metric, topn)\n', (4176, 4190), False, 'import ultra\n'), ((5278, 5368), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('%s_%d' % (metric, topn))", 'metric_value'], {'collections': "['train_eval']"}), "('%s_%d' % (metric, topn), metric_value, collections=[\n 'train_eval'])\n", (5295, 5368), True, 'import tensorflow as tf\n'), ((7367, 7402), 'six.moves.zip', 'zip', (['self.clipped_gradients', 'params'], {}), '(self.clipped_gradients, params)\n', (7370, 7402), False, 'from six.moves import zip\n'), ((7733, 7760), 'six.moves.zip', 'zip', (['self.gradients', 'params'], {}), '(self.gradients, params)\n', (7736, 7760), False, 'from six.moves import zip\n'), ((5135, 5183), 'ultra.utils.make_ranking_metric_fn', 'ultra.utils.make_ranking_metric_fn', (['metric', 'topn'], {}), '(metric, topn)\n', (5169, 5183), False, 'import ultra\n'), ((6633, 6649), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['p'], {}), '(p)\n', (6646, 6649), True, 'import tensorflow as tf\n'), ((10124, 10151), 'numpy.zeros_like', 'np.zeros_like', (['denominators'], {}), '(denominators)\n', (10137, 10151), True, 'import numpy as np\n'), ((6282, 6304), 'tensorflow.exp', 'tf.exp', (['pair_scores[0]'], {}), '(pair_scores[0])\n', (6288, 6304), True, 'import tensorflow as tf\n'), ((6333, 6355), 'tensorflow.exp', 'tf.exp', (['pair_scores[0]'], {}), '(pair_scores[0])\n', (6339, 6355), True, 'import tensorflow as tf\n'), ((6358, 6380), 'tensorflow.exp', 'tf.exp', (['pair_scores[1]'], {}), '(pair_scores[1])\n', (6364, 6380), True, 'import tensorflow as tf\n'), ((11713, 11743), 'numpy.copy', 'np.copy', (['exp_ranking_scores[i]'], {}), '(exp_ranking_scores[i])\n', (11720, 11743), True, 'import numpy as np\n'), ((13000, 13016), 'numpy.isnan', 'np.isnan', (['weight'], {}), '(weight)\n', (13008, 13016), True, 'import numpy as np\n'), ((11997, 12032), 'numpy.cumsum', 'np.cumsum', (['flipped_exp_scores[::-1]'], {}), '(flipped_exp_scores[::-1])\n', (12006, 12032), True, 'import numpy as np\n'), ((12296, 12330), 'numpy.zeros_like', 'np.zeros_like', (['flipped_denominator'], {}), '(flipped_denominator)\n', (12309, 12330), True, 'import numpy as np\n'), ((13210, 13243), 'numpy.isnan', 'np.isnan', (['sum_log_denominators[i]'], {}), '(sum_log_denominators[i])\n', (13218, 13243), True, 'import numpy as np\n'), ((13374, 13411), 'numpy.isnan', 'np.isnan', (['sum_log_flipped_denominator'], {}), '(sum_log_flipped_denominator)\n', (13382, 13411), True, 'import numpy as np\n')]
|
"""
* https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv/
You are given an integer array prices where prices[i] is the price of a given stock on the ith day.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Notice that you may not engage in multiple transactions simultaneously (i.e., you must sell the stock before you buy again).
"""
def max_profit_unlimited(prices, i, j):
# max profit between i and j with unlimited transcations
steps = 0
for x in range(i + 1, j + 1):
steps += max(0, prices[x] - prices[x - 1])
return steps
def max_profit_ij(prices, i, j):
# max profit with at most 1 transcation from day 0 to day i
l = len(prices)
if i < 0 or i >= l or j < 0 or j >= l or j <= i:
return 0
lowest = prices[i]
max_profit = 0
for x in range(i, j + 1):
price = prices[x]
max_profit = max(max_profit, price - lowest)
lowest = min(lowest, price)
return max_profit
def solution1(k, prices):
# divide conquer; Time O(2^N); Space(1); TLE
l = len(prices)
if l <= 1:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
def max_profit_from_i(k, i):
# max profit with at most k transactions from day i
if k < 1:
return 0
elif k == 1:
return max_profit_ij(prices, i, l - 1)
max_profit = 0
for j in range(i, l):
max_profit_j = max_profit_ij(prices, i, j) + max_profit_from_i(k - 1, j)
max_profit = max(max_profit, max_profit_j)
return max_profit
return max_profit_from_i(k, 0)
def solution2(k, prices):
# Time: O(K*N); Space: O(K*N)
l = len(prices)
if l <= 1:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
cash_by_k = [[0] * l for _ in range(k + 1) ]
for k0 in range(1, k + 1):
cash_last_around = cash_by_k[k0 - 1]
cash_curr_around = cash_by_k[k0]
max_profit_may_with_trans = cash_last_around[0]
for today in range(1, l):
yesterday = today - 1
max_profit_may_with_trans = max(
cash_last_around[today],
max_profit_may_with_trans + prices[today] - prices[yesterday])
cash_curr_around[today] = max(cash_curr_around[yesterday], max_profit_may_with_trans)
return cash_by_k[-1][-1]
def solution3(k, prices):
l = len(prices)
if l <= 1 or k == 0:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
def find_slop(start):
i = start + 1
while i < l and prices[i] >= prices[i - 1]:
i += 1
return (start, i - 1)
slops = []
i = 0
while i < l:
(start, end) = find_slop(i)
if end > start:
slops.append((start, end))
i = end + 1
while len(slops) > k:
# one merge: two near slops with min profit lost
min_merge_lost = float('inf')
to_merge = (0, 1)
for i in range(1, len(slops)):
s1, s2 = slops[i - 1], slops[i]
merge_lost = min(
prices[s1[1]] - prices[s1[0]],
prices[s2[1]] - prices[s2[0]],
prices[s1[1]] - prices[s2[0]],)
if merge_lost < min_merge_lost:
min_merge_lost = merge_lost
to_merge = (i - 1, i)
s1, s2 = slops[to_merge[0]], slops[to_merge[1]]
p1, p2 = prices[s1[1]] - prices[s1[0]], prices[s2[1]] - prices[s2[0]]
p_merge = prices[s2[1]] - prices[s1[0]]
if p_merge > p1 and p_merge > p2:
merge_to = (s1[0], s2[1])
else:
merge_to = s1 if p1 > p2 else s2
slops[to_merge[0]] = merge_to
slops.pop(to_merge[1])
return sum([prices[end] - prices[start] for (start, end) in slops])
import unittest
from unittest_data_provider import data_provider
def data():
return [
(2, 2, [2,4,1]),
(7, 2, [3,2,6,5,0,3]),
(0, 0, [1, 3]),
(6, 2, [3,3,5,0,0,3,1,4]),
(5, 1, [6,1,6,4,3,0,2]),
(5, 1, [8,9,6,1,6,4,3,0,2]),
(11, 2, [8,6,4,3,3,2,3,5,8,3,8,2,6]),
]
def big_data():
return [
(482, 11, [48,12,60,93,97,42,25,64,17,56,85,93,9,48,52,42,58,85,81,84,69,36,1,54,23,15,72,15,11,94]),
]
class Tests(unittest.TestCase):
@data_provider(data)
def test_all_solutions(self, expected, *argv):
for n in range(1, 10):
fn_name = 'solution' + str(n)
if fn_name in globals():
fn = globals()[fn_name]
#print('Expect %s. Testing %s with input %s' % (str(expected), fn_name, str(argv)))
self.assertEqual(expected, fn(*argv))
@data_provider(big_data)
def test_big_input(self, expected, *argv):
self.assertEqual(expected, solution2(*argv))
self.assertEqual(expected, solution3(*argv))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"unittest_data_provider.data_provider",
"unittest.TestLoader"
] |
[((4424, 4443), 'unittest_data_provider.data_provider', 'data_provider', (['data'], {}), '(data)\n', (4437, 4443), False, 'from unittest_data_provider import data_provider\n'), ((4805, 4828), 'unittest_data_provider.data_provider', 'data_provider', (['big_data'], {}), '(big_data)\n', (4818, 4828), False, 'from unittest_data_provider import data_provider\n'), ((5022, 5043), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (5041, 5043), False, 'import unittest\n'), ((5077, 5113), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5100, 5113), False, 'import unittest\n')]
|
from sqlalchemy_utils import EmailType, PhoneNumberType
from flask_ecom_api.api.v1.customers.admin import (
CustomerAdminView,
CustomerShippingAddressAdminView,
)
from flask_ecom_api.api.v1.orders.models import Order
from flask_ecom_api.app import admin, db
class Customer(db.Model):
"""Customer model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(
db.String(length=50),
index=True,
unique=True,
nullable=False,
)
date_of_birth = db.Column(db.DateTime)
email = db.Column(
EmailType,
index=True,
unique=True,
nullable=False,
)
shipping_addresses = db.relationship(
'CustomerShippingAddress',
backref='customer',
lazy='joined',
)
orders = db.relationship(Order, lazy='joined')
def __repr__(self):
"""Printable representation of Customer model."""
return f'<Customer id: {self.id}, customer name: {self.name}>'
class CustomerShippingAddress(db.Model):
"""Customer shipping address model."""
id = db.Column(db.Integer, primary_key=True)
customer_id = db.Column(
db.Integer,
db.ForeignKey('customer.id'),
index=True,
nullable=False,
)
first_name = db.Column(db.String(50), nullable=False)
last_name = db.Column(db.String(50), nullable=False)
phone_number = db.Column(PhoneNumberType())
country = db.Column(db.String(20), nullable=False)
city = db.Column(db.String(20), nullable=False)
street = db.Column(db.String(20), nullable=False)
house_number = db.Column(db.Integer, nullable=False)
apartment_number = db.Column(db.Integer, nullable=False)
postcode = db.Column(db.Integer, nullable=False)
comment = db.Column(db.String(140))
customers = db.relationship('Customer', lazy='joined')
def __repr__(self):
"""Printable representation of CustomerShippingAddress model."""
return f'<Customer shipping address id: {self.id}>'
admin.add_view(
CustomerAdminView(
Customer,
db.session,
category='Customers',
),
)
admin.add_view(
CustomerShippingAddressAdminView(
CustomerShippingAddress,
db.session,
category='Customers',
),
)
|
[
"flask_ecom_api.api.v1.customers.admin.CustomerAdminView",
"flask_ecom_api.api.v1.customers.admin.CustomerShippingAddressAdminView",
"flask_ecom_api.app.db.relationship",
"sqlalchemy_utils.PhoneNumberType",
"flask_ecom_api.app.db.Column",
"flask_ecom_api.app.db.String",
"flask_ecom_api.app.db.ForeignKey"
] |
[((331, 370), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (340, 370), False, 'from flask_ecom_api.app import admin, db\n'), ((514, 536), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (523, 536), False, 'from flask_ecom_api.app import admin, db\n'), ((549, 610), 'flask_ecom_api.app.db.Column', 'db.Column', (['EmailType'], {'index': '(True)', 'unique': '(True)', 'nullable': '(False)'}), '(EmailType, index=True, unique=True, nullable=False)\n', (558, 610), False, 'from flask_ecom_api.app import admin, db\n'), ((676, 753), 'flask_ecom_api.app.db.relationship', 'db.relationship', (['"""CustomerShippingAddress"""'], {'backref': '"""customer"""', 'lazy': '"""joined"""'}), "('CustomerShippingAddress', backref='customer', lazy='joined')\n", (691, 753), False, 'from flask_ecom_api.app import admin, db\n'), ((798, 835), 'flask_ecom_api.app.db.relationship', 'db.relationship', (['Order'], {'lazy': '"""joined"""'}), "(Order, lazy='joined')\n", (813, 835), False, 'from flask_ecom_api.app import admin, db\n'), ((1086, 1125), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1095, 1125), False, 'from flask_ecom_api.app import admin, db\n'), ((1606, 1643), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (1615, 1643), False, 'from flask_ecom_api.app import admin, db\n'), ((1667, 1704), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (1676, 1704), False, 'from flask_ecom_api.app import admin, db\n'), ((1720, 1757), 'flask_ecom_api.app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (1729, 1757), False, 'from flask_ecom_api.app import admin, db\n'), ((1815, 1857), 'flask_ecom_api.app.db.relationship', 'db.relationship', (['"""Customer"""'], {'lazy': '"""joined"""'}), "('Customer', lazy='joined')\n", (1830, 1857), False, 'from flask_ecom_api.app import admin, db\n'), ((2038, 2099), 'flask_ecom_api.api.v1.customers.admin.CustomerAdminView', 'CustomerAdminView', (['Customer', 'db.session'], {'category': '"""Customers"""'}), "(Customer, db.session, category='Customers')\n", (2055, 2099), False, 'from flask_ecom_api.api.v1.customers.admin import CustomerAdminView, CustomerShippingAddressAdminView\n'), ((2155, 2250), 'flask_ecom_api.api.v1.customers.admin.CustomerShippingAddressAdminView', 'CustomerShippingAddressAdminView', (['CustomerShippingAddress', 'db.session'], {'category': '"""Customers"""'}), "(CustomerShippingAddress, db.session,\n category='Customers')\n", (2187, 2250), False, 'from flask_ecom_api.api.v1.customers.admin import CustomerAdminView, CustomerShippingAddressAdminView\n'), ((401, 421), 'flask_ecom_api.app.db.String', 'db.String', ([], {'length': '(50)'}), '(length=50)\n', (410, 421), False, 'from flask_ecom_api.app import admin, db\n'), ((1183, 1211), 'flask_ecom_api.app.db.ForeignKey', 'db.ForeignKey', (['"""customer.id"""'], {}), "('customer.id')\n", (1196, 1211), False, 'from flask_ecom_api.app import admin, db\n'), ((1290, 1303), 'flask_ecom_api.app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (1299, 1303), False, 'from flask_ecom_api.app import admin, db\n'), ((1347, 1360), 'flask_ecom_api.app.db.String', 'db.String', (['(50)'], {}), '(50)\n', (1356, 1360), False, 'from flask_ecom_api.app import admin, db\n'), ((1407, 1424), 'sqlalchemy_utils.PhoneNumberType', 'PhoneNumberType', ([], {}), '()\n', (1422, 1424), False, 'from sqlalchemy_utils import EmailType, PhoneNumberType\n'), ((1450, 1463), 'flask_ecom_api.app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1459, 1463), False, 'from flask_ecom_api.app import admin, db\n'), ((1502, 1515), 'flask_ecom_api.app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1511, 1515), False, 'from flask_ecom_api.app import admin, db\n'), ((1556, 1569), 'flask_ecom_api.app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (1565, 1569), False, 'from flask_ecom_api.app import admin, db\n'), ((1782, 1796), 'flask_ecom_api.app.db.String', 'db.String', (['(140)'], {}), '(140)\n', (1791, 1796), False, 'from flask_ecom_api.app import admin, db\n')]
|
import factory
from ipaddr import IPv4Network
from pycroft.model.net import VLAN, Subnet
from tests.factories.base import BaseFactory
class VLANFactory(BaseFactory):
class Meta:
model = VLAN
name = factory.Sequence(lambda n: "vlan{}".format(n+1))
vid = factory.Sequence(lambda n: n+1)
class SubnetFactory(BaseFactory):
class Meta:
model = Subnet
exclude = ('str_address',)
str_address = factory.Faker('ipv4', network=True)
address = factory.LazyAttribute(lambda o: IPv4Network(o.str_address))
vlan = factory.SubFactory(VLANFactory)
|
[
"factory.SubFactory",
"ipaddr.IPv4Network",
"factory.Faker",
"factory.Sequence"
] |
[((277, 310), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n + 1)'], {}), '(lambda n: n + 1)\n', (293, 310), False, 'import factory\n'), ((438, 473), 'factory.Faker', 'factory.Faker', (['"""ipv4"""'], {'network': '(True)'}), "('ipv4', network=True)\n", (451, 473), False, 'import factory\n'), ((559, 590), 'factory.SubFactory', 'factory.SubFactory', (['VLANFactory'], {}), '(VLANFactory)\n', (577, 590), False, 'import factory\n'), ((520, 546), 'ipaddr.IPv4Network', 'IPv4Network', (['o.str_address'], {}), '(o.str_address)\n', (531, 546), False, 'from ipaddr import IPv4Network\n')]
|
import torch
x = torch.Tensor([0, 1, 2, 3]).requires_grad_()
y = torch.Tensor([4, 5, 6, 7]).requires_grad_()
w = torch.Tensor([1, 2, 3, 4]).requires_grad_()
z = x+y
def hook_fn(grad):
print(grad)
handle_1 = z.register_hook(hook_fn)
o = w.matmul(z)
def hook_fn2(grad):
print('grad')
handle_2 = z.register_hook(hook_fn2)
handle_2.remove()
print('=====Start backprop=====')
o.backward()
print('=====End backprop=====')
print('x.grad:', x.grad)
print('y.grad:', y.grad)
print('w.grad:', w.grad)
print('z.grad:', z.grad)
|
[
"torch.Tensor"
] |
[((18, 44), 'torch.Tensor', 'torch.Tensor', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (30, 44), False, 'import torch\n'), ((66, 92), 'torch.Tensor', 'torch.Tensor', (['[4, 5, 6, 7]'], {}), '([4, 5, 6, 7])\n', (78, 92), False, 'import torch\n'), ((114, 140), 'torch.Tensor', 'torch.Tensor', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (126, 140), False, 'import torch\n')]
|
import adv.adv_test
from core.advbase import *
from module.bleed import Bleed
from slot.a import *
from slot.d import *
def module():
return Botan
class Botan(Adv):
# comment = "RR+Jewels"
a3 = ('prep_charge',0.05)
conf = {}
conf['slots.a'] = RR() + BN()
conf['slots.d'] = Shinobi()
conf['acl'] = """
`s2, pin='prep' or fsc
`s1, (x=5 or fsc) and self.bleed._static['stacks']<3
`s3, x=5 or fsc
`fs, x=5
"""
def init(self):
if self.condition('buff all team'):
self.s2_proc = self.c_s2_proc
def prerun(self):
self.bleed = Bleed("g_bleed",0).reset()
def s1_proc(self, e):
Bleed("s1", 1.46).on()
def c_s2_proc(self, e):
Teambuff('s2',0.1,15,'crit','chance').on()
def s2_proc(self, e):
Selfbuff('s2',0.1,15,'crit','chance').on()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf)
|
[
"module.bleed.Bleed"
] |
[((630, 649), 'module.bleed.Bleed', 'Bleed', (['"""g_bleed"""', '(0)'], {}), "('g_bleed', 0)\n", (635, 649), False, 'from module.bleed import Bleed\n'), ((692, 709), 'module.bleed.Bleed', 'Bleed', (['"""s1"""', '(1.46)'], {}), "('s1', 1.46)\n", (697, 709), False, 'from module.bleed import Bleed\n')]
|
from django.db import models
import datetime
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=16, primary_key=True)
password = models.CharField(max_length=64)
email = models.EmailField()
is_confirmed = models.BooleanField()
def __unicode__(self):
return self.username
class Meta:
db_table="bio_user"
class UserSafety(models.Model):
user = models.ForeignKey(User)
activation_key = models.CharField(max_length=64, blank=True)
key_expires = models.DateTimeField(default=datetime.date.today())
def __unicode__(self):
return self.user.username
class Meta:
db_table = 'bio_usersafety'
class loginRecord(models.Model):
identity = models.CharField(max_length=64)
login_time = models.DateTimeField(auto_now_add=True)
login_ip = models.CharField(max_length=64, null=True)
isSuccess = models.BooleanField(default=False)
def __unicode__(self):
return self.identity
class Meta:
db_table = 'record_login_record'
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"datetime.date.today",
"django.db.models.BooleanField",
"django.db.models.EmailField",
"django.db.models.DateTimeField"
] |
[((115, 164), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'primary_key': '(True)'}), '(max_length=16, primary_key=True)\n', (131, 164), False, 'from django.db import models\n'), ((180, 211), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (196, 211), False, 'from django.db import models\n'), ((224, 243), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (241, 243), False, 'from django.db import models\n'), ((263, 284), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (282, 284), False, 'from django.db import models\n'), ((433, 456), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (450, 456), False, 'from django.db import models\n'), ((478, 521), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'blank': '(True)'}), '(max_length=64, blank=True)\n', (494, 521), False, 'from django.db import models\n'), ((757, 788), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (773, 788), False, 'from django.db import models\n'), ((806, 845), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (826, 845), False, 'from django.db import models\n'), ((861, 903), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (877, 903), False, 'from django.db import models\n'), ((920, 954), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (939, 954), False, 'from django.db import models\n'), ((569, 590), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (588, 590), False, 'import datetime\n')]
|
import serial,time
ser = None
def InitSerial(port,baudrate,timeout):
global ser
reply = 'None'
try:
ser = serial.Serial(port,baudrate = baudrate,timeout = timeout) # open serial port
except Exception as e:
reply = e
return reply
def N_Serial():
global ser
n = ser.inWaiting()
return str(n)
def WriteSerial(text):
global ser
send = bytes(text, 'ascii')
n = ser.write(send)
return str(n)
def ReadSerial(len):
global ser
return str(ser.read(len).decode())
def CloseSerial():
global ser
ser.close() # close port
|
[
"serial.Serial"
] |
[((129, 184), 'serial.Serial', 'serial.Serial', (['port'], {'baudrate': 'baudrate', 'timeout': 'timeout'}), '(port, baudrate=baudrate, timeout=timeout)\n', (142, 184), False, 'import serial, time\n')]
|
# coding=utf-8
# Copyright (C) 2020 ATHENA AUTHORS; <NAME>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
import sys
import codecs
def process_files(decode_log_file, vocab_file):
""" process decode log, generate label file and results files
"""
vocab = {}
with codecs.open(vocab_file, "r", "utf-8") as vocab_file:
for line in vocab_file:
phone, num = line.strip().split()
vocab[int(num)] = phone
decode_result = open(decode_log_file + ".result", "w", encoding="utf8")
label_result = open(decode_log_file + ".label", "w", encoding="utf8")
with open(decode_log_file, "r") as fin:
to_continue = False
total_line = ""
for line in fin.readlines():
if "predictions" in line:
total_line = line.strip() + " "
to_continue = True
elif to_continue:
total_line += line.strip() + " "
if "avg_acc" in total_line and "Message" not in total_line:
predictions = [int(item) for item in
total_line.split("[[")[1].split("]]")[0].split()][:-1]
labels = [int(item) for item in
total_line.split("[[")[2].split("]]")[0].split()]
decode_result.write(" ".join(
" ".join(vocab[item] for item in predictions).split()) + "\n")
label_result.write(" ".join(
" ".join(vocab[item] for item in labels).split()) + "\n")
decode_result.flush()
label_result.flush()
to_continue = False
total_line = ""
decode_result.close()
label_result.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python process_decode_result.py inference.log vocab")
sys.exit()
_, decode_log, vocab = sys.argv
process_files(decode_log, vocab)
|
[
"codecs.open",
"sys.exit"
] |
[((936, 973), 'codecs.open', 'codecs.open', (['vocab_file', '"""r"""', '"""utf-8"""'], {}), "(vocab_file, 'r', 'utf-8')\n", (947, 973), False, 'import codecs\n'), ((2531, 2541), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2539, 2541), False, 'import sys\n')]
|
"""
# 3D high-res brain mesh
Showing a ultra-high resolution mesh of a human brain, acquired with a 7 Tesla MRI.
The data is not yet publicly available.
Data courtesy of <NAME> et al.:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (2020)
*7 Tesla MRI Followed by Histological 3D Reconstructions in Whole-Brain Specimens*
Front. Neuroanat. 14:536838
doi: 10.3389/fnana.2020.536838
Acknowledgements to <NAME> and <NAME> for data access.
"""
from pathlib import Path
import numpy as np
from datoviz import canvas, run, colormap
c = canvas(show_fps=True, width=1024, height=768)
panel = c.panel(controller='arcball')
visual = panel.visual('mesh', transform='auto')
ROOT = Path(__file__).parent.parent.parent.parent
pos = np.load(ROOT / "data/mesh/brain_highres.vert.npy")
faces = np.load(ROOT / "data/mesh/brain_highres.faces.npy")
assert pos.ndim == 2
assert pos.shape[1] == 3
assert faces.ndim == 2
assert faces.shape[1] == 3
print(f"Mesh has {len(faces)} triangles and {len(pos)} vertices")
visual.data('pos', pos)
visual.data('index', faces.ravel())
visual.data('clip', np.array([0, 0, 1, 1]))
gui = c.gui("GUI")
@gui.control("slider_float", "clip", vmin=-1, vmax=+1, value=+1)
def on_change(value):
visual.data('clip', np.array([0, 0, 1, value]))
run()
|
[
"numpy.load",
"datoviz.canvas",
"datoviz.run",
"pathlib.Path",
"numpy.array"
] |
[((628, 673), 'datoviz.canvas', 'canvas', ([], {'show_fps': '(True)', 'width': '(1024)', 'height': '(768)'}), '(show_fps=True, width=1024, height=768)\n', (634, 673), False, 'from datoviz import canvas, run, colormap\n'), ((817, 867), 'numpy.load', 'np.load', (["(ROOT / 'data/mesh/brain_highres.vert.npy')"], {}), "(ROOT / 'data/mesh/brain_highres.vert.npy')\n", (824, 867), True, 'import numpy as np\n'), ((876, 927), 'numpy.load', 'np.load', (["(ROOT / 'data/mesh/brain_highres.faces.npy')"], {}), "(ROOT / 'data/mesh/brain_highres.faces.npy')\n", (883, 927), True, 'import numpy as np\n'), ((1357, 1362), 'datoviz.run', 'run', ([], {}), '()\n', (1360, 1362), False, 'from datoviz import canvas, run, colormap\n'), ((1173, 1195), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (1181, 1195), True, 'import numpy as np\n'), ((1328, 1354), 'numpy.array', 'np.array', (['[0, 0, 1, value]'], {}), '([0, 0, 1, value])\n', (1336, 1354), True, 'import numpy as np\n'), ((768, 782), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (772, 782), False, 'from pathlib import Path\n')]
|
"""Create and use a dataset using an external file.
Note that this example:
- Only works when it's run on the same host as the Kive server and Kive worker
(e.g. in the `dev-env` environment). On a production server, external files
are kept in a network share, so they can be accessed from different hosts.
- Requires an instance of `librarian.models.ExternalFileDirectory` called "tmp"
pointing at `/tmp` to be created and saved on the server. This can be done
through the Django shell (`python manage.py shell` in the `kive` directory).
"""
import io
import pathlib
import pprint
import kiveapi
# Use HTTPS on a real server, so your password is encrypted.
# Don't put your real password in source code, store it in a text file
# that is only readable by your user account or some more secure storage.
session = kiveapi.KiveAPI("http://localhost:8000")
session.login('kive', 'kive')
# Set up an External File to use in an example run.
EFD_DIRECTORY = pathlib.Path("/tmp")
EFD_DIRECTORY_NAME = "tmp"
EFD_NAME = "api_example_external_file.csv"
EFD_CONTENT = "name\nCamus"
with (EFD_DIRECTORY / EFD_NAME).open("w") as outf:
outf.write(EFD_CONTENT)
# Upload data
try:
uploaded_dataset = session.add_dataset(
'API Example 3 External Dataset',
'None',
None,
None,
None,
["Everyone"],
externalfiledirectory=EFD_DIRECTORY_NAME,
external_path=EFD_NAME,
)
except kiveapi.KiveMalformedDataException as e:
print(e)
pass
# Now get the file and check that the results make sense.
retrieved_dataset = session.find_datasets(
dataset_id=uploaded_dataset.dataset_id)[0]
pprint.pprint(retrieved_dataset.__dict__)
assert retrieved_dataset.dataset_id == uploaded_dataset.dataset_id
assert retrieved_dataset.filename == uploaded_dataset.filename
assert retrieved_dataset.name == "API Example 3 External Dataset"
assert retrieved_dataset.users_allowed == []
assert retrieved_dataset.groups_allowed == ["Everyone"]
assert retrieved_dataset.externalfiledirectory == EFD_DIRECTORY_NAME
assert retrieved_dataset.external_path == EFD_NAME
buffer = io.StringIO()
retrieved_dataset.download(buffer)
assert buffer.getvalue() == EFD_CONTENT
|
[
"pathlib.Path",
"kiveapi.KiveAPI",
"pprint.pprint",
"io.StringIO"
] |
[((825, 865), 'kiveapi.KiveAPI', 'kiveapi.KiveAPI', (['"""http://localhost:8000"""'], {}), "('http://localhost:8000')\n", (840, 865), False, 'import kiveapi\n'), ((966, 986), 'pathlib.Path', 'pathlib.Path', (['"""/tmp"""'], {}), "('/tmp')\n", (978, 986), False, 'import pathlib\n'), ((1661, 1702), 'pprint.pprint', 'pprint.pprint', (['retrieved_dataset.__dict__'], {}), '(retrieved_dataset.__dict__)\n', (1674, 1702), False, 'import pprint\n'), ((2132, 2145), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2143, 2145), False, 'import io\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-07 01:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangoapp', '0006_remove_gallery_slug'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='image',
field=models.ImageField(height_field='height', upload_to='', width_field='width'),
),
]
|
[
"django.db.models.ImageField"
] |
[((400, 475), 'django.db.models.ImageField', 'models.ImageField', ([], {'height_field': '"""height"""', 'upload_to': '""""""', 'width_field': '"""width"""'}), "(height_field='height', upload_to='', width_field='width')\n", (417, 475), False, 'from django.db import migrations, models\n')]
|
# coding=utf-8
""" Configuration of nox test automation tool. """
import nox
@nox.session(python=['3.8', '3.9'])
def lint(session):
"""Run static analysis."""
session.run("pipenv", "install", "--dev", external=True)
session.run("pipenv", "run", "flake8", "loganalysis/", "tests/")
@nox.session(python=['3.8', '3.9'])
def tests(session):
"""Run tests for all supported versions of Python."""
session.run("pipenv", "install", "--dev", external=True)
session.run("pipenv", "run", "pytest", "tests/")
|
[
"nox.session"
] |
[((81, 115), 'nox.session', 'nox.session', ([], {'python': "['3.8', '3.9']"}), "(python=['3.8', '3.9'])\n", (92, 115), False, 'import nox\n'), ((299, 333), 'nox.session', 'nox.session', ([], {'python': "['3.8', '3.9']"}), "(python=['3.8', '3.9'])\n", (310, 333), False, 'import nox\n')]
|
from floodsystem.stationdata import build_station_list,update_water_levels
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
update_water_levels(stations)
N = 10
a = stations_highest_rel_level(stations, N)
for i in a:
print("{}, {}".format(i.name, i.latest_level))
if __name__ == "__main__":
print("*** Task 2A: CUED Part IA Flood Warning System ***")
run()
|
[
"floodsystem.flood.stations_highest_rel_level",
"floodsystem.stationdata.build_station_list",
"floodsystem.stationdata.update_water_levels"
] |
[((159, 179), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (177, 179), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((184, 213), 'floodsystem.stationdata.update_water_levels', 'update_water_levels', (['stations'], {}), '(stations)\n', (203, 213), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((233, 272), 'floodsystem.flood.stations_highest_rel_level', 'stations_highest_rel_level', (['stations', 'N'], {}), '(stations, N)\n', (259, 272), False, 'from floodsystem.flood import stations_highest_rel_level\n')]
|
#<NAME>
#MoTrack Therapy
#Created Mon Oct 14, 2019
#GOAL: Convert standard iOS file names ("IMG_1750.JPG") to MoTrack data image standard names ("IMG_0001_A_RAW.JPG").
#Description:
#Doesn't rename the files in place in case there is a bug. Instead takes input images in one folder, and makes output in another folder
#Assumes that the images are sequentially named in the original pairing (but not necessarily consecutive if some bad images were deleted)
#Assumes that the RAW image is first, followed by the CV image, for each pair
import os
import re
import shutil
################################################################
############################ PART A ############################
###################### CONFIGURE DETAILS ######################
################################################################
#VALUES THAT MIGHT NEED CHANGING
num_letters_per_set = -1 #Make this 8 for A->H system. Do -1 to enter in specific points
set_starters = [8, 34, 36, 52, 78, 102, 104, 108, 128, 130, 152, 172, 192, 214, 230, 248, 266, 284, 286,
310, 312, 314, 316, 334, 352, 370] #Enter first image in each set
start_set_num = 205 #The first set number
image_pair_names = ["RAW", "CV"] #makes images_in_each_pair 2
original_files_folder = 'original_names' #What folder the original files with their original names are located in
new_files_folder = 'new_names' #Where to put the renamed files
################################################################
############################ PART B ############################
###################### DO THE OPERATIONS ######################
################################################################
#GET INPUT FILES IN FOLDER
all_orig_file_names = os.listdir(original_files_folder)
all_orig_image_names = [k for k in all_orig_file_names if re.match(r'IMG_\d{4}', k)]
all_orig_image_names.sort() #put the files in alphabetical order. IMPORTANT!
print( "# of Files to Rename: " + str(len(all_orig_image_names)))
#(STARTING) CONSTANTS
images_in_each_pair = len(image_pair_names) #2 for RAW and CV
ct = 0
ct_within_set = 0 #doesn't count duplicate _A1 _A2 etc
tot_ct_within_set = 0
set_num = start_set_num
set_names = []
set_lengths = []
set_A_cts = {}
all_new_file_names = []
set_starters_str = ["IMG_{:04d}.JPG".format(img_num) for img_num in set_starters]
error_str = ""
renamed_count = 0
#DO THE RENAMING OPERATIONS
for orig_image_name in all_orig_image_names:
if num_letters_per_set>0 and ct % (num_letters_per_set*images_in_each_pair)==0:
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
set_num += 1
tot_ct_within_set = 0
ct_within_set = 0
elif orig_image_name in set_starters_str:
if ct_within_set>2*images_in_each_pair: #2 here because only A and B are allowed to be duplicated. Anything else, make a new set
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
set_num += 1
tot_ct_within_set = 0
ct_within_set = 0
else:
#Don't increment set number. Just make it named like _A1_RAW.JPG, etc
ct_within_set -= images_in_each_pair
if ct_within_set < 0:
ct_within_set = 0
if ct_within_set >= 26*images_in_each_pair:
error_str += "ERROR: EXCEEDED ALL 26 ALPHABET LETTER OPTIONS.\n"
print("Error. Breaking out of loop.")
break
letter = chr(65+(int(ct_within_set/images_in_each_pair))) #65 represents first letter in alphabet, "A"
if letter=="A":
set_A_cts["{:04d}".format(set_num)] = 0
category = image_pair_names[ct%images_in_each_pair]
subcategoryNum = 0
while True:
new_file_name = "IMG_{:04d}_{}{}_{}.JPG".format(set_num, letter, "" if subcategoryNum==0 else str(subcategoryNum), category )
if letter=="A":
set_A_cts["{:04d}".format(set_num)] = set_A_cts["{:04d}".format(set_num)] + 1
if new_file_name not in all_new_file_names:
break
print("While renaming '" + orig_image_name + "', already found intended a file that is already named '"+new_file_name+"'.")
subcategoryNum += 1
#UNCOMMENT THIS TO ACTUALLY DO THE REMAINING, NOT JUST TO TEST
#shutil.copyfile(original_files_folder+'/'+orig_image_name, new_files_folder+'/'+new_file_name)
renamed_count += 1
all_new_file_names.append(new_file_name)
ct += 1
ct_within_set += 1
tot_ct_within_set += 1
#FINISH OFF FOR LOOP BY COMPLETING LAST SET
set_names.append("{:04d}".format(set_num))
set_lengths.append(tot_ct_within_set)
################################################################
############################ PART C ############################
####################### PRINT OUT OUTPUT #######################
################################################################
#PRINT OUT THE FILE NAMES TO PUT IN EXCEL
print()
#print(all_new_file_names)
print("#\tOld File Name\tNew File Name")
for i,new_file_name in enumerate(all_new_file_names):
print(str(i) + "\t" + all_orig_image_names[i] + "\t" + new_file_name)
#PRINT OUT SET LENGTHS
print()
print("#\tName\tTot\tPairs\tAs\tLetters")
renamed_count_check = 0
for i,set_name in enumerate(set_names):
set_length_pair = int(set_lengths[i]/images_in_each_pair+0.5)
str_to_print = str(i)+"\t" + set_name + "\t" + str(set_length_pair) + "\t" + str(set_A_cts[set_name])
str_to_print += "\tA-{}".format( chr(65+set_lengths[i]-set_length_pair-1) )
if set_A_cts[set_name] > 1:
for i2 in range(2,set_A_cts[set_name]+1):
str_to_print += ",A"+str(i2)
print(str_to_print)
renamed_count_check += set_lengths[i]
#PRINT OUT TOTAL NUMBER OF IMAGES RENAMED
print()
print("Renamed a total of " + str(renamed_count) + " images (Double checked value=" + str(renamed_count_check) + ")")
if renamed_count != renamed_count_check:
error_str += "ERROR: DOUBLING CHECK TOTAL COUNT OF RENAMED IMAGES FAILED." \
" {} != {}. len(all_orig_image_names)={}. len(all_new_file_names)={}" \
".\n".format(renamed_count,renamed_count_check,len(all_orig_image_names),len(all_new_file_names))
#PRINT OUT ANY ERRORS
print(error_str)
|
[
"re.match",
"os.listdir"
] |
[((1738, 1771), 'os.listdir', 'os.listdir', (['original_files_folder'], {}), '(original_files_folder)\n', (1748, 1771), False, 'import os\n'), ((1830, 1855), 're.match', 're.match', (['"""IMG_\\\\d{4}"""', 'k'], {}), "('IMG_\\\\d{4}', k)\n", (1838, 1855), False, 'import re\n')]
|
# -*- encoding: utf-8 -*-
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
General tests.
@since: 0.1.0
"""
from __future__ import absolute_import
import six
from types import ModuleType
import unittest
import miniamf
from .util import ClassCacheClearingTestCase, replace_dict, Spam
class ASObjectTestCase(unittest.TestCase):
"""
I exercise all functionality relating to the L{ASObject<miniamf.ASObject>}
class.
"""
def test_init(self):
bag = miniamf.ASObject(spam='eggs', baz='spam')
self.assertEqual(bag, dict(spam='eggs', baz='spam'))
self.assertEqual(bag.spam, 'eggs')
self.assertEqual(bag.baz, 'spam')
def test_eq(self):
bag = miniamf.ASObject()
self.assertEqual(bag, {})
self.assertNotEqual(bag, {'spam': 'eggs'})
bag2 = miniamf.ASObject()
self.assertEqual(bag2, {})
self.assertEqual(bag, bag2)
self.assertNotEqual(bag, None)
def test_setitem(self):
bag = miniamf.ASObject()
self.assertEqual(bag, {})
bag['spam'] = 'eggs'
self.assertEqual(bag.spam, 'eggs')
def test_delitem(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag.spam, 'eggs')
del bag['spam']
self.assertRaises(AttributeError, lambda: bag.spam)
def test_getitem(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag['spam'], 'eggs')
def test_iter(self):
bag = miniamf.ASObject({'spam': 'eggs'})
x = []
for k, v in six.iteritems(bag):
x.append((k, v))
self.assertEqual(x, [('spam', 'eggs')])
def test_hash(self):
bag = miniamf.ASObject({'spam': 'eggs'})
self.assertNotEqual(None, hash(bag))
class HelperTestCase(unittest.TestCase):
"""
Tests all helper functions in C{miniamf.__init__}
"""
def setUp(self):
self.default_encoding = miniamf.DEFAULT_ENCODING
def tearDown(self):
miniamf.DEFAULT_ENCODING = self.default_encoding
def test_get_decoder(self):
self.assertRaises(ValueError, miniamf.get_decoder, 'spam')
decoder = miniamf.get_decoder(miniamf.AMF0, stream=b'123', strict=True)
self.assertEqual(decoder.stream.getvalue(), b'123')
self.assertTrue(decoder.strict)
decoder = miniamf.get_decoder(miniamf.AMF3, stream=b'456', strict=True)
self.assertEqual(decoder.stream.getvalue(), b'456')
self.assertTrue(decoder.strict)
def test_get_encoder(self):
miniamf.get_encoder(miniamf.AMF0)
miniamf.get_encoder(miniamf.AMF3)
self.assertRaises(ValueError, miniamf.get_encoder, b'spam')
encoder = miniamf.get_encoder(miniamf.AMF0, stream=b'spam')
self.assertEqual(encoder.stream.getvalue(), b'spam')
self.assertFalse(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF3, stream=b'eggs')
self.assertFalse(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF0, strict=True)
self.assertTrue(encoder.strict)
encoder = miniamf.get_encoder(miniamf.AMF3, strict=True)
self.assertTrue(encoder.strict)
def test_encode(self):
self.assertEqual(
miniamf.encode(u'connect', 1.0).getvalue(),
b'\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00'
)
def test_decode(self):
self.assertEqual(
list(miniamf.decode(
b'\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00')),
[u'connect', 1.0]
)
def test_default_encoding(self):
miniamf.DEFAULT_ENCODING = miniamf.AMF3
x = miniamf.encode('foo').getvalue()
self.assertEqual(x, b'\x06\x07foo')
miniamf.DEFAULT_ENCODING = miniamf.AMF0
x = miniamf.encode('foo').getvalue()
self.assertEqual(x, b'\x02\x00\x03foo')
class UnregisterClassTestCase(ClassCacheClearingTestCase):
def test_klass(self):
alias = miniamf.register_class(Spam, 'spam.eggs')
miniamf.unregister_class(Spam)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
self.assertTrue(Spam not in miniamf.CLASS_CACHE)
self.assertTrue(alias not in miniamf.CLASS_CACHE)
def test_alias(self):
alias = miniamf.register_class(Spam, 'spam.eggs')
miniamf.unregister_class('spam.eggs')
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
self.assertTrue(alias not in miniamf.CLASS_CACHE)
class ClassLoaderTestCase(ClassCacheClearingTestCase):
def test_register(self):
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
miniamf.register_class_loader(chr)
self.assertTrue(chr in miniamf.CLASS_LOADERS)
def test_bad_register(self):
self.assertRaises(TypeError, miniamf.register_class_loader, 1)
miniamf.register_class_loader(ord)
def test_unregister(self):
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
miniamf.register_class_loader(chr)
self.assertTrue(chr in miniamf.CLASS_LOADERS)
miniamf.unregister_class_loader(chr)
self.assertTrue(chr not in miniamf.CLASS_LOADERS)
self.assertRaises(LookupError, miniamf.unregister_class_loader, chr)
def test_load_class(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return Spam
miniamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
miniamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in miniamf.CLASS_CACHE)
def test_load_unknown_class(self):
def class_loader(x):
return None
miniamf.register_class_loader(class_loader)
with self.assertRaises(miniamf.UnknownClassAlias):
miniamf.load_class('spam.eggs')
def test_load_class_by_alias(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return miniamf.ClassAlias(Spam, 'spam.eggs')
miniamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in miniamf.CLASS_CACHE)
miniamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in miniamf.CLASS_CACHE)
def test_load_class_bad_return(self):
def class_loader(x):
return 'xyz'
miniamf.register_class_loader(class_loader)
self.assertRaises(TypeError, miniamf.load_class, 'spam.eggs')
def test_load_class_by_module(self):
miniamf.load_class('unittest.TestCase')
def test_load_class_by_module_bad(self):
with self.assertRaises(miniamf.UnknownClassAlias):
miniamf.load_class('unittest.TestCase.')
class TypeMapTestCase(unittest.TestCase):
def setUp(self):
self.tm = miniamf.TYPE_MAP.copy()
self.addCleanup(replace_dict, self.tm, miniamf.TYPE_MAP)
def test_add_invalid(self):
mod = ModuleType('spam')
self.assertRaises(TypeError, miniamf.add_type, mod)
self.assertRaises(TypeError, miniamf.add_type, {})
self.assertRaises(TypeError, miniamf.add_type, 'spam')
self.assertRaises(TypeError, miniamf.add_type, u'eggs')
self.assertRaises(TypeError, miniamf.add_type, 1)
self.assertRaises(TypeError, miniamf.add_type, 234234)
self.assertRaises(TypeError, miniamf.add_type, 34.23)
self.assertRaises(TypeError, miniamf.add_type, None)
self.assertRaises(TypeError, miniamf.add_type, object())
class A:
pass
self.assertRaises(TypeError, miniamf.add_type, A())
def test_add_same(self):
miniamf.add_type(chr)
self.assertRaises(KeyError, miniamf.add_type, chr)
def test_add_class(self):
class A:
pass
class B(object):
pass
miniamf.add_type(A)
self.assertTrue(A in miniamf.TYPE_MAP)
miniamf.add_type(B)
self.assertTrue(B in miniamf.TYPE_MAP)
def test_add_callable(self):
td = miniamf.add_type(ord)
self.assertTrue(ord in miniamf.TYPE_MAP)
self.assertTrue(td in miniamf.TYPE_MAP.values())
def test_add_multiple(self):
td = miniamf.add_type((chr,))
class A(object):
pass
class B(object):
pass
class C(object):
pass
td = miniamf.add_type([A, B, C])
self.assertEqual(td, miniamf.get_type([A, B, C]))
def test_get_type(self):
self.assertRaises(KeyError, miniamf.get_type, chr)
td = miniamf.add_type((chr,))
self.assertRaises(KeyError, miniamf.get_type, chr)
td2 = miniamf.get_type((chr, ))
self.assertEqual(td, td2)
td2 = miniamf.get_type([chr, ])
self.assertEqual(td, td2)
def test_remove(self):
self.assertRaises(KeyError, miniamf.remove_type, chr)
td = miniamf.add_type((chr,))
self.assertRaises(KeyError, miniamf.remove_type, chr)
td2 = miniamf.remove_type((chr,))
self.assertEqual(td, td2)
class ErrorClassMapTestCase(unittest.TestCase):
"""
I test all functionality related to manipulating L{miniamf.ERROR_CLASS_MAP}
"""
def setUp(self):
self.map_copy = miniamf.ERROR_CLASS_MAP.copy()
self.addCleanup(replace_dict, self.map_copy, miniamf.ERROR_CLASS_MAP)
def test_add(self):
class A:
pass
class B(Exception):
pass
self.assertRaises(TypeError, miniamf.add_error_class, None, 'a')
# class A does not sub-class Exception
self.assertRaises(TypeError, miniamf.add_error_class, A, 'a')
miniamf.add_error_class(B, 'b')
self.assertEqual(miniamf.ERROR_CLASS_MAP['b'], B)
miniamf.add_error_class(B, 'a')
self.assertEqual(miniamf.ERROR_CLASS_MAP['a'], B)
class C(Exception):
pass
self.assertRaises(ValueError, miniamf.add_error_class, C, 'b')
def test_remove(self):
class B(Exception):
pass
miniamf.ERROR_CLASS_MAP['abc'] = B
self.assertRaises(TypeError, miniamf.remove_error_class, None)
miniamf.remove_error_class('abc')
self.assertFalse('abc' in miniamf.ERROR_CLASS_MAP)
self.assertRaises(KeyError, miniamf.ERROR_CLASS_MAP.__getitem__, 'abc')
miniamf.ERROR_CLASS_MAP['abc'] = B
miniamf.remove_error_class(B)
self.assertRaises(KeyError, miniamf.ERROR_CLASS_MAP.__getitem__, 'abc')
self.assertRaises(ValueError, miniamf.remove_error_class, B)
self.assertRaises(ValueError, miniamf.remove_error_class, 'abc')
class DummyAlias(miniamf.ClassAlias):
pass
class RegisterAliasTypeTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = miniamf.ALIAS_TYPES.copy()
self.addCleanup(replace_dict, self.old_aliases, miniamf.ALIAS_TYPES)
def test_bad_klass(self):
self.assertRaises(TypeError, miniamf.register_alias_type, 1)
def test_subclass(self):
self.assertFalse(issubclass(self.__class__, miniamf.ClassAlias))
with self.assertRaises(ValueError):
miniamf.register_alias_type(self.__class__)
def test_no_args(self):
self.assertTrue(issubclass(DummyAlias, miniamf.ClassAlias))
self.assertRaises(ValueError, miniamf.register_alias_type, DummyAlias)
def test_type_args(self):
self.assertTrue(issubclass(DummyAlias, miniamf.ClassAlias))
self.assertRaises(TypeError,
miniamf.register_alias_type, DummyAlias, 1)
def test_single(self):
class A(object):
pass
miniamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.ALIAS_TYPES[DummyAlias], (A,))
def test_multiple(self):
class A(object):
pass
class B(object):
pass
with self.assertRaises(TypeError):
miniamf.register_alias_type(DummyAlias, A, 'hello')
miniamf.register_alias_type(DummyAlias, A, B)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.ALIAS_TYPES[DummyAlias], (A, B))
def test_duplicate(self):
class A(object):
pass
miniamf.register_alias_type(DummyAlias, A)
with self.assertRaises(RuntimeError):
miniamf.register_alias_type(DummyAlias, A)
def test_unregister(self):
"""
Tests for L{miniamf.unregister_alias_type}
"""
class A(object):
pass
self.assertFalse(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.unregister_alias_type(A), None)
miniamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in miniamf.ALIAS_TYPES)
self.assertEqual(miniamf.unregister_alias_type(DummyAlias), (A,))
class TypedObjectTestCase(unittest.TestCase):
def test_externalised(self):
o = miniamf.TypedObject(None)
self.assertRaises(miniamf.DecodeError, o.__readamf__, None)
self.assertRaises(miniamf.EncodeError, o.__writeamf__, None)
def test_alias(self):
class Foo:
pass
alias = miniamf.TypedObjectClassAlias(Foo, 'bar')
self.assertEqual(alias.klass, miniamf.TypedObject)
self.assertNotEqual(alias.klass, Foo)
class PackageTestCase(ClassCacheClearingTestCase):
"""
Tests for L{miniamf.register_package}
"""
class NewType(object):
pass
class ClassicType:
pass
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
self.module = ModuleType("foo")
self.module.Classic = self.ClassicType
self.module.New = self.NewType
self.module.b = b'binary'
self.module.i = 12323
self.module.f = 345.234
self.module.u = u"Unicöde"
self.module.l = ["list", "of", "junk"]
self.module.d = {"foo": "bar", "baz": "gak"}
self.module.obj = object()
self.module.mod = self.module
self.module.lam = lambda _: None
self.NewType.__module__ = "foo"
self.ClassicType.__module__ = "foo"
self.spam_module = Spam.__module__
Spam.__module__ = "foo"
self.names = (self.module.__name__,)
def tearDown(self):
ClassCacheClearingTestCase.tearDown(self)
Spam.__module__ = self.spam_module
self.module.__name__ = self.names
def check_module(self, r, base_package):
self.assertEqual(len(r), 2)
for c in [self.NewType, self.ClassicType]:
alias = r[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, base_package + c.__name__)
def test_module(self):
r = miniamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_all(self):
self.module.Spam = Spam
self.module.__all__ = ['Classic', 'New']
r = miniamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_ignore(self):
self.module.Spam = Spam
r = miniamf.register_package(self.module, 'com.example',
ignore=['Spam'])
self.check_module(r, 'com.example.')
def test_separator(self):
r = miniamf.register_package(self.module, 'com.example', separator='/')
self.ClassicType.__module__ = 'com.example'
self.NewType.__module__ = 'com.example'
self.check_module(r, 'com.example/')
def test_name(self):
self.module.__name__ = 'spam.eggs'
self.ClassicType.__module__ = 'spam.eggs'
self.NewType.__module__ = 'spam.eggs'
r = miniamf.register_package(self.module)
self.check_module(r, 'spam.eggs.')
def test_dict(self):
"""
@see: #585
"""
d = dict()
d['Spam'] = Spam
r = miniamf.register_package(d, 'com.example', strict=False)
self.assertEqual(len(r), 1)
alias = r[Spam]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, Spam)
self.assertEqual(alias.alias, 'com.example.Spam')
def test_odd(self):
self.assertRaises(TypeError, miniamf.register_package, object())
self.assertRaises(TypeError, miniamf.register_package, 1)
self.assertRaises(TypeError, miniamf.register_package, 1.2)
self.assertRaises(TypeError, miniamf.register_package, 23897492834)
self.assertRaises(TypeError, miniamf.register_package, [])
self.assertRaises(TypeError, miniamf.register_package, b'')
self.assertRaises(TypeError, miniamf.register_package, u'')
def test_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = miniamf.register_package(self.module, 'com.example', strict=True)
self.check_module(r, 'com.example.')
def test_not_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = miniamf.register_package(self.module, 'com.example', strict=False)
self.assertEqual(len(r), 3)
for c in [self.NewType, self.ClassicType, Spam]:
alias = r[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'com.example.' + c.__name__)
def test_list(self):
class Foo:
pass
class Bar:
pass
ret = miniamf.register_package([Foo, Bar], 'spam.eggs')
self.assertEqual(len(ret), 2)
for c in [Foo, Bar]:
alias = ret[c]
self.assertTrue(isinstance(alias, miniamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'spam.eggs.' + c.__name__)
class UndefinedTestCase(unittest.TestCase):
"""
Tests for L{miniamf.Undefined}
"""
def test_none(self):
"""
L{miniamf.Undefined} is not referentially identical to C{None}.
"""
self.assertFalse(miniamf.Undefined is None)
def test_non_zero(self):
"""
Truth test for L{miniamf.Undefined} == C{False}.
"""
self.assertFalse(miniamf.Undefined)
class TestAMF0Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for AMF0 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf0
except ImportError:
from miniamf import amf0
decoder = miniamf.get_decoder(miniamf.AMF0)
self.assertIsInstance(decoder, amf0.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf0
except ImportError:
self.skipTest('amf0 extension not available')
decoder = miniamf.get_decoder(miniamf.AMF0, use_ext=True)
self.assertIsInstance(decoder, amf0.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf0
decoder = miniamf.get_decoder(miniamf.AMF0, use_ext=False)
self.assertIsInstance(decoder, amf0.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf0
except ImportError:
from miniamf import amf0
encoder = miniamf.get_encoder(miniamf.AMF0)
self.assertIsInstance(encoder, amf0.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf0
except ImportError:
self.skipTest('amf0 extension not available')
encoder = miniamf.get_encoder(miniamf.AMF0, use_ext=True)
self.assertIsInstance(encoder, amf0.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf0
encoder = miniamf.get_encoder(miniamf.AMF0, use_ext=False)
self.assertIsInstance(encoder, amf0.Encoder)
class TestAMF3Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for amf3 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf3
except ImportError:
from miniamf import amf3
decoder = miniamf.get_decoder(miniamf.AMF3)
self.assertIsInstance(decoder, amf3.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf3
except ImportError:
self.skipTest('amf3 extension not available')
decoder = miniamf.get_decoder(miniamf.AMF3, use_ext=True)
self.assertIsInstance(decoder, amf3.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf3
decoder = miniamf.get_decoder(miniamf.AMF3, use_ext=False)
self.assertIsInstance(decoder, amf3.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from miniamf._accel import amf3
except ImportError:
from miniamf import amf3
encoder = miniamf.get_encoder(miniamf.AMF3)
self.assertIsInstance(encoder, amf3.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from miniamf._accel import amf3
except ImportError:
self.skipTest('amf3 extension not available')
encoder = miniamf.get_encoder(miniamf.AMF3, use_ext=True)
self.assertIsInstance(encoder, amf3.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from miniamf import amf3
encoder = miniamf.get_encoder(miniamf.AMF3, use_ext=False)
self.assertIsInstance(encoder, amf3.Encoder)
|
[
"miniamf.get_decoder",
"miniamf.get_type",
"miniamf.remove_error_class",
"six.iteritems",
"miniamf.add_type",
"miniamf.remove_type",
"miniamf.ERROR_CLASS_MAP.copy",
"miniamf.register_class_loader",
"miniamf.register_alias_type",
"miniamf.load_class",
"miniamf.unregister_class",
"miniamf.decode",
"miniamf.register_class",
"miniamf.ASObject",
"miniamf.TYPE_MAP.values",
"miniamf.unregister_alias_type",
"miniamf.encode",
"miniamf.get_encoder",
"miniamf.unregister_class_loader",
"miniamf.register_package",
"miniamf.add_error_class",
"miniamf.ALIAS_TYPES.copy",
"miniamf.TypedObject",
"miniamf.TypedObjectClassAlias",
"miniamf.ClassAlias",
"types.ModuleType",
"miniamf.TYPE_MAP.copy"
] |
[((500, 541), 'miniamf.ASObject', 'miniamf.ASObject', ([], {'spam': '"""eggs"""', 'baz': '"""spam"""'}), "(spam='eggs', baz='spam')\n", (516, 541), False, 'import miniamf\n'), ((727, 745), 'miniamf.ASObject', 'miniamf.ASObject', ([], {}), '()\n', (743, 745), False, 'import miniamf\n'), ((848, 866), 'miniamf.ASObject', 'miniamf.ASObject', ([], {}), '()\n', (864, 866), False, 'import miniamf\n'), ((1021, 1039), 'miniamf.ASObject', 'miniamf.ASObject', ([], {}), '()\n', (1037, 1039), False, 'import miniamf\n'), ((1192, 1226), 'miniamf.ASObject', 'miniamf.ASObject', (["{'spam': 'eggs'}"], {}), "({'spam': 'eggs'})\n", (1208, 1226), False, 'import miniamf\n'), ((1399, 1433), 'miniamf.ASObject', 'miniamf.ASObject', (["{'spam': 'eggs'}"], {}), "({'spam': 'eggs'})\n", (1415, 1433), False, 'import miniamf\n'), ((1521, 1555), 'miniamf.ASObject', 'miniamf.ASObject', (["{'spam': 'eggs'}"], {}), "({'spam': 'eggs'})\n", (1537, 1555), False, 'import miniamf\n'), ((1593, 1611), 'six.iteritems', 'six.iteritems', (['bag'], {}), '(bag)\n', (1606, 1611), False, 'import six\n'), ((1731, 1765), 'miniamf.ASObject', 'miniamf.ASObject', (["{'spam': 'eggs'}"], {}), "({'spam': 'eggs'})\n", (1747, 1765), False, 'import miniamf\n'), ((2205, 2266), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF0'], {'stream': "b'123'", 'strict': '(True)'}), "(miniamf.AMF0, stream=b'123', strict=True)\n", (2224, 2266), False, 'import miniamf\n'), ((2386, 2447), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF3'], {'stream': "b'456'", 'strict': '(True)'}), "(miniamf.AMF3, stream=b'456', strict=True)\n", (2405, 2447), False, 'import miniamf\n'), ((2589, 2622), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {}), '(miniamf.AMF0)\n', (2608, 2622), False, 'import miniamf\n'), ((2631, 2664), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {}), '(miniamf.AMF3)\n', (2650, 2664), False, 'import miniamf\n'), ((2752, 2801), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {'stream': "b'spam'"}), "(miniamf.AMF0, stream=b'spam')\n", (2771, 2801), False, 'import miniamf\n'), ((2923, 2972), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {'stream': "b'eggs'"}), "(miniamf.AMF3, stream=b'eggs')\n", (2942, 2972), False, 'import miniamf\n'), ((3033, 3079), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {'strict': '(True)'}), '(miniamf.AMF0, strict=True)\n', (3052, 3079), False, 'import miniamf\n'), ((3139, 3185), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {'strict': '(True)'}), '(miniamf.AMF3, strict=True)\n', (3158, 3185), False, 'import miniamf\n'), ((4032, 4073), 'miniamf.register_class', 'miniamf.register_class', (['Spam', '"""spam.eggs"""'], {}), "(Spam, 'spam.eggs')\n", (4054, 4073), False, 'import miniamf\n'), ((4083, 4113), 'miniamf.unregister_class', 'miniamf.unregister_class', (['Spam'], {}), '(Spam)\n', (4107, 4113), False, 'import miniamf\n'), ((4336, 4377), 'miniamf.register_class', 'miniamf.register_class', (['Spam', '"""spam.eggs"""'], {}), "(Spam, 'spam.eggs')\n", (4358, 4377), False, 'import miniamf\n'), ((4387, 4424), 'miniamf.unregister_class', 'miniamf.unregister_class', (['"""spam.eggs"""'], {}), "('spam.eggs')\n", (4411, 4424), False, 'import miniamf\n'), ((4699, 4733), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['chr'], {}), '(chr)\n', (4728, 4733), False, 'import miniamf\n'), ((4901, 4935), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['ord'], {}), '(ord)\n', (4930, 4935), False, 'import miniamf\n'), ((5034, 5068), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['chr'], {}), '(chr)\n', (5063, 5068), False, 'import miniamf\n'), ((5132, 5168), 'miniamf.unregister_class_loader', 'miniamf.unregister_class_loader', (['chr'], {}), '(chr)\n', (5163, 5168), False, 'import miniamf\n'), ((5445, 5488), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['class_loader'], {}), '(class_loader)\n', (5474, 5488), False, 'import miniamf\n'), ((5562, 5593), 'miniamf.load_class', 'miniamf.load_class', (['"""spam.eggs"""'], {}), "('spam.eggs')\n", (5580, 5593), False, 'import miniamf\n'), ((5756, 5799), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['class_loader'], {}), '(class_loader)\n', (5785, 5799), False, 'import miniamf\n'), ((6085, 6128), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['class_loader'], {}), '(class_loader)\n', (6114, 6128), False, 'import miniamf\n'), ((6202, 6233), 'miniamf.load_class', 'miniamf.load_class', (['"""spam.eggs"""'], {}), "('spam.eggs')\n", (6220, 6233), False, 'import miniamf\n'), ((6400, 6443), 'miniamf.register_class_loader', 'miniamf.register_class_loader', (['class_loader'], {}), '(class_loader)\n', (6429, 6443), False, 'import miniamf\n'), ((6565, 6604), 'miniamf.load_class', 'miniamf.load_class', (['"""unittest.TestCase"""'], {}), "('unittest.TestCase')\n", (6583, 6604), False, 'import miniamf\n'), ((6846, 6869), 'miniamf.TYPE_MAP.copy', 'miniamf.TYPE_MAP.copy', ([], {}), '()\n', (6867, 6869), False, 'import miniamf\n'), ((6983, 7001), 'types.ModuleType', 'ModuleType', (['"""spam"""'], {}), "('spam')\n", (6993, 7001), False, 'from types import ModuleType\n'), ((7691, 7712), 'miniamf.add_type', 'miniamf.add_type', (['chr'], {}), '(chr)\n', (7707, 7712), False, 'import miniamf\n'), ((7889, 7908), 'miniamf.add_type', 'miniamf.add_type', (['A'], {}), '(A)\n', (7905, 7908), False, 'import miniamf\n'), ((7965, 7984), 'miniamf.add_type', 'miniamf.add_type', (['B'], {}), '(B)\n', (7981, 7984), False, 'import miniamf\n'), ((8079, 8100), 'miniamf.add_type', 'miniamf.add_type', (['ord'], {}), '(ord)\n', (8095, 8100), False, 'import miniamf\n'), ((8255, 8279), 'miniamf.add_type', 'miniamf.add_type', (['(chr,)'], {}), '((chr,))\n', (8271, 8279), False, 'import miniamf\n'), ((8423, 8450), 'miniamf.add_type', 'miniamf.add_type', (['[A, B, C]'], {}), '([A, B, C])\n', (8439, 8450), False, 'import miniamf\n'), ((8611, 8635), 'miniamf.add_type', 'miniamf.add_type', (['(chr,)'], {}), '((chr,))\n', (8627, 8635), False, 'import miniamf\n'), ((8710, 8734), 'miniamf.get_type', 'miniamf.get_type', (['(chr,)'], {}), '((chr,))\n', (8726, 8734), False, 'import miniamf\n'), ((8785, 8808), 'miniamf.get_type', 'miniamf.get_type', (['[chr]'], {}), '([chr])\n', (8801, 8808), False, 'import miniamf\n'), ((8948, 8972), 'miniamf.add_type', 'miniamf.add_type', (['(chr,)'], {}), '((chr,))\n', (8964, 8972), False, 'import miniamf\n'), ((9050, 9077), 'miniamf.remove_type', 'miniamf.remove_type', (['(chr,)'], {}), '((chr,))\n', (9069, 9077), False, 'import miniamf\n'), ((9305, 9335), 'miniamf.ERROR_CLASS_MAP.copy', 'miniamf.ERROR_CLASS_MAP.copy', ([], {}), '()\n', (9333, 9335), False, 'import miniamf\n'), ((9720, 9751), 'miniamf.add_error_class', 'miniamf.add_error_class', (['B', '"""b"""'], {}), "(B, 'b')\n", (9743, 9751), False, 'import miniamf\n'), ((9819, 9850), 'miniamf.add_error_class', 'miniamf.add_error_class', (['B', '"""a"""'], {}), "(B, 'a')\n", (9842, 9850), False, 'import miniamf\n'), ((10225, 10258), 'miniamf.remove_error_class', 'miniamf.remove_error_class', (['"""abc"""'], {}), "('abc')\n", (10251, 10258), False, 'import miniamf\n'), ((10451, 10480), 'miniamf.remove_error_class', 'miniamf.remove_error_class', (['B'], {}), '(B)\n', (10477, 10480), False, 'import miniamf\n'), ((10855, 10881), 'miniamf.ALIAS_TYPES.copy', 'miniamf.ALIAS_TYPES.copy', ([], {}), '()\n', (10879, 10881), False, 'import miniamf\n'), ((11723, 11765), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (11750, 11765), False, 'import miniamf\n'), ((12122, 12167), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A', 'B'], {}), '(DummyAlias, A, B)\n', (12149, 12167), False, 'import miniamf\n'), ((12375, 12417), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (12402, 12417), False, 'import miniamf\n'), ((12804, 12846), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (12831, 12846), False, 'import miniamf\n'), ((13074, 13099), 'miniamf.TypedObject', 'miniamf.TypedObject', (['None'], {}), '(None)\n', (13093, 13099), False, 'import miniamf\n'), ((13318, 13359), 'miniamf.TypedObjectClassAlias', 'miniamf.TypedObjectClassAlias', (['Foo', '"""bar"""'], {}), "(Foo, 'bar')\n", (13347, 13359), False, 'import miniamf\n'), ((13747, 13764), 'types.ModuleType', 'ModuleType', (['"""foo"""'], {}), "('foo')\n", (13757, 13764), False, 'from types import ModuleType\n'), ((14947, 14999), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {}), "(self.module, 'com.example')\n", (14971, 14999), False, 'import miniamf\n'), ((15165, 15217), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {}), "(self.module, 'com.example')\n", (15189, 15217), False, 'import miniamf\n'), ((15336, 15405), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {'ignore': "['Spam']"}), "(self.module, 'com.example', ignore=['Spam'])\n", (15360, 15405), False, 'import miniamf\n'), ((15531, 15598), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {'separator': '"""/"""'}), "(self.module, 'com.example', separator='/')\n", (15555, 15598), False, 'import miniamf\n'), ((15923, 15960), 'miniamf.register_package', 'miniamf.register_package', (['self.module'], {}), '(self.module)\n', (15947, 15960), False, 'import miniamf\n'), ((16130, 16186), 'miniamf.register_package', 'miniamf.register_package', (['d', '"""com.example"""'], {'strict': '(False)'}), "(d, 'com.example', strict=False)\n", (16154, 16186), False, 'import miniamf\n'), ((17043, 17108), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {'strict': '(True)'}), "(self.module, 'com.example', strict=True)\n", (17067, 17108), False, 'import miniamf\n'), ((17275, 17341), 'miniamf.register_package', 'miniamf.register_package', (['self.module', '"""com.example"""'], {'strict': '(False)'}), "(self.module, 'com.example', strict=False)\n", (17299, 17341), False, 'import miniamf\n'), ((17760, 17809), 'miniamf.register_package', 'miniamf.register_package', (['[Foo, Bar]', '"""spam.eggs"""'], {}), "([Foo, Bar], 'spam.eggs')\n", (17784, 17809), False, 'import miniamf\n'), ((18917, 18950), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF0'], {}), '(miniamf.AMF0)\n', (18936, 18950), False, 'import miniamf\n'), ((19295, 19342), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF0'], {'use_ext': '(True)'}), '(miniamf.AMF0, use_ext=True)\n', (19314, 19342), False, 'import miniamf\n'), ((19583, 19631), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF0'], {'use_ext': '(False)'}), '(miniamf.AMF0, use_ext=False)\n', (19602, 19631), False, 'import miniamf\n'), ((19959, 19992), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {}), '(miniamf.AMF0)\n', (19978, 19992), False, 'import miniamf\n'), ((20337, 20384), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {'use_ext': '(True)'}), '(miniamf.AMF0, use_ext=True)\n', (20356, 20384), False, 'import miniamf\n'), ((20625, 20673), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF0'], {'use_ext': '(False)'}), '(miniamf.AMF0, use_ext=False)\n', (20644, 20673), False, 'import miniamf\n'), ((21131, 21164), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF3'], {}), '(miniamf.AMF3)\n', (21150, 21164), False, 'import miniamf\n'), ((21509, 21556), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF3'], {'use_ext': '(True)'}), '(miniamf.AMF3, use_ext=True)\n', (21528, 21556), False, 'import miniamf\n'), ((21797, 21845), 'miniamf.get_decoder', 'miniamf.get_decoder', (['miniamf.AMF3'], {'use_ext': '(False)'}), '(miniamf.AMF3, use_ext=False)\n', (21816, 21845), False, 'import miniamf\n'), ((22173, 22206), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {}), '(miniamf.AMF3)\n', (22192, 22206), False, 'import miniamf\n'), ((22551, 22598), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {'use_ext': '(True)'}), '(miniamf.AMF3, use_ext=True)\n', (22570, 22598), False, 'import miniamf\n'), ((22839, 22887), 'miniamf.get_encoder', 'miniamf.get_encoder', (['miniamf.AMF3'], {'use_ext': '(False)'}), '(miniamf.AMF3, use_ext=False)\n', (22858, 22887), False, 'import miniamf\n'), ((5872, 5903), 'miniamf.load_class', 'miniamf.load_class', (['"""spam.eggs"""'], {}), "('spam.eggs')\n", (5890, 5903), False, 'import miniamf\n'), ((6038, 6075), 'miniamf.ClassAlias', 'miniamf.ClassAlias', (['Spam', '"""spam.eggs"""'], {}), "(Spam, 'spam.eggs')\n", (6056, 6075), False, 'import miniamf\n'), ((6722, 6762), 'miniamf.load_class', 'miniamf.load_class', (['"""unittest.TestCase."""'], {}), "('unittest.TestCase.')\n", (6740, 6762), False, 'import miniamf\n'), ((8480, 8507), 'miniamf.get_type', 'miniamf.get_type', (['[A, B, C]'], {}), '([A, B, C])\n', (8496, 8507), False, 'import miniamf\n'), ((11218, 11261), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['self.__class__'], {}), '(self.__class__)\n', (11245, 11261), False, 'import miniamf\n'), ((12061, 12112), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A', '"""hello"""'], {}), "(DummyAlias, A, 'hello')\n", (12088, 12112), False, 'import miniamf\n'), ((12477, 12519), 'miniamf.register_alias_type', 'miniamf.register_alias_type', (['DummyAlias', 'A'], {}), '(DummyAlias, A)\n', (12504, 12519), False, 'import miniamf\n'), ((12755, 12787), 'miniamf.unregister_alias_type', 'miniamf.unregister_alias_type', (['A'], {}), '(A)\n', (12784, 12787), False, 'import miniamf\n'), ((12932, 12973), 'miniamf.unregister_alias_type', 'miniamf.unregister_alias_type', (['DummyAlias'], {}), '(DummyAlias)\n', (12961, 12973), False, 'import miniamf\n'), ((3481, 3548), 'miniamf.decode', 'miniamf.decode', (["b'\\x06\\x0fconnect\\x05?\\xf0\\x00\\x00\\x00\\x00\\x00\\x00'"], {}), "(b'\\x06\\x0fconnect\\x05?\\xf0\\x00\\x00\\x00\\x00\\x00\\x00')\n", (3495, 3548), False, 'import miniamf\n'), ((3707, 3728), 'miniamf.encode', 'miniamf.encode', (['"""foo"""'], {}), "('foo')\n", (3721, 3728), False, 'import miniamf\n'), ((3847, 3868), 'miniamf.encode', 'miniamf.encode', (['"""foo"""'], {}), "('foo')\n", (3861, 3868), False, 'import miniamf\n'), ((8181, 8206), 'miniamf.TYPE_MAP.values', 'miniamf.TYPE_MAP.values', ([], {}), '()\n', (8204, 8206), False, 'import miniamf\n'), ((3292, 3323), 'miniamf.encode', 'miniamf.encode', (['u"""connect"""', '(1.0)'], {}), "(u'connect', 1.0)\n", (3306, 3323), False, 'import miniamf\n')]
|
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.testing import assert_array_equal
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from feature_selection import HarmonicSearch
from feature_selection import GeneticAlgorithm
from feature_selection import RandomSearch
from feature_selection import BinaryBlackHole
from feature_selection import SimulatedAnneling
from feature_selection import BRKGA
from feature_selection import SPEA2
from feature_selection import PSO
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
import nose.plugins.multiprocess
# Those are nose tests: to run it, write: python -m nose
_multiprocess_can_split_ = True
METACLASSES = [
SimulatedAnneling, PSO, HarmonicSearch, GeneticAlgorithm, RandomSearch,
BinaryBlackHole, BRKGA,
SPEA2]
def test_check_estimator():
for metaclass in METACLASSES:
print("check_estimator: ", metaclass.__class__.__name__)
check_estimator(metaclass)
def test_overall():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma='auto')
for metaclass in METACLASSES:
meta = metaclass(estimator=clf, random_state=0, verbose=False,
make_logbook=True, repeat=1, number_gen=2,
)
print("Checking: ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# Transformed dataset
X_1 = meta.transform(X)
meta = metaclass(estimator=clf, random_state=0,
make_logbook=True, repeat=1, number_gen=2, )
# Fit and Transform
X_2 = meta.fit_transform(X=X, y=y, normalize=True)
assert_array_equal(X_1, X_2)
meta.best_pareto()
meta.all_paretos()
meta.best_solution()
meta.all_solutions()
def test_parallel():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma="auto")
for metaclass in METACLASSES :
meta = metaclass(estimator=clf, random_state=0, make_logbook=False,
repeat=2, number_gen=2, parallel=True, verbose=True,
)
print("Checking parallel ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# Transformed dataset
X_1 = meta.transform(X)
meta = metaclass(estimator=clf, random_state=0, make_logbook=False,
repeat=2, number_gen=2, parallel=True, )
# Fit and Transform
X_2 = meta.fit_transform(X=X, y=y, normalize=True)
# Check Function
assert_array_equal(X_1, X_2)
def test_unusual_errors():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC(gamma='auto')
for metaclass in METACLASSES:
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2, )
print("Checking unusual error: ", meta.__class__.__name__)
meta.fit(X, y, normalize=True)
# Let's suppose you have a empty best
assert_raises(ValueError, meta.safe_mask, X, [])
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2, )
#assert_raises(ValueError, meta.score_func_to_gridsearch, meta)
for metaclass in [BRKGA]:
meta = metaclass(estimator=clf, random_state=0, verbose=0,
make_logbook=True, repeat=1, number_gen=2,
elite_size=5)
assert_raises(ValueError, meta.fit, [ [1, 1, 1], [1,2,3] ], [1, 0])
def test_predict():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
sa = SimulatedAnneling( number_gen=2)
sa.fit(X,y, normalize=True)
sa.predict(X)
"""
def test_score_grid_func():
dataset = load_breast_cancer()
X, y = dataset['data'], dataset['target_names'].take(dataset['target'])
# Classifier to be used in the metaheuristic
clf = SVC()
for metaclass in METACLASSES:
meta = metaclass(classifier=clf, random_state=0, verbose=True,
make_logbook=True, repeat=1, number_gen=3,
)
print("Checking Grid: ", meta.__class__.__name__)
# Fit the classifier
meta.fit(X, y, normalize=True)
# See score
meta.score_func_to_gridsearch(meta)
"""
|
[
"sklearn.utils.testing.assert_raises",
"feature_selection.SimulatedAnneling",
"sklearn.datasets.load_breast_cancer",
"sklearn.utils.testing.assert_array_equal",
"sklearn.utils.estimator_checks.check_estimator",
"sklearn.svm.SVC"
] |
[((1121, 1141), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1139, 1141), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1282, 1299), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (1285, 1299), False, 'from sklearn.svm import SVC\n'), ((2117, 2137), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (2135, 2137), False, 'from sklearn.datasets import load_breast_cancer\n'), ((2278, 2295), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (2281, 2295), False, 'from sklearn.svm import SVC\n'), ((3065, 3085), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (3083, 3085), False, 'from sklearn.datasets import load_breast_cancer\n'), ((3226, 3243), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (3229, 3243), False, 'from sklearn.svm import SVC\n'), ((4170, 4190), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (4188, 4190), False, 'from sklearn.datasets import load_breast_cancer\n'), ((4330, 4361), 'feature_selection.SimulatedAnneling', 'SimulatedAnneling', ([], {'number_gen': '(2)'}), '(number_gen=2)\n', (4347, 4361), False, 'from feature_selection import SimulatedAnneling\n'), ((1042, 1068), 'sklearn.utils.estimator_checks.check_estimator', 'check_estimator', (['metaclass'], {}), '(metaclass)\n', (1057, 1068), False, 'from sklearn.utils.estimator_checks import check_estimator\n'), ((1936, 1964), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['X_1', 'X_2'], {}), '(X_1, X_2)\n', (1954, 1964), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((2992, 3020), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['X_1', 'X_2'], {}), '(X_1, X_2)\n', (3010, 3020), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((3585, 3633), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'meta.safe_mask', 'X', '[]'], {}), '(ValueError, meta.safe_mask, X, [])\n', (3598, 3633), False, 'from sklearn.utils.testing import assert_raises\n'), ((4055, 4122), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'meta.fit', '[[1, 1, 1], [1, 2, 3]]', '[1, 0]'], {}), '(ValueError, meta.fit, [[1, 1, 1], [1, 2, 3]], [1, 0])\n', (4068, 4122), False, 'from sklearn.utils.testing import assert_raises\n')]
|
import math
from typing import List, Union, Sequence
from pyrep.backend import sim
from pyrep.objects.object import Object, object_type_to_class
import numpy as np
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
class VisionSensor(Object):
"""A camera-type sensor, reacting to light, colors and images.
"""
def __init__(self, name_or_handle: Union[str, int]):
super().__init__(name_or_handle)
self.resolution = sim.simGetVisionSensorResolution(self._handle)
@staticmethod
def create(resolution: List[int], explicit_handling=False,
perspective_mode=True, show_volume_not_detecting=True,
show_volume_detecting=True, passive=False,
use_local_lights=False, show_fog=True,
near_clipping_plane=1e-2, far_clipping_plane=10.0,
view_angle=60.0, ortho_size=1.0, sensor_size=None,
render_mode=RenderMode.OPENGL3,
position=None, orientation=None) -> 'VisionSensor':
""" Create a Vision Sensor
:param resolution: List of the [x, y] resolution.
:param explicit_handling: Sensor will be explicitly handled.
:param perspective_mode: Sensor will be operated in Perspective Mode.
Orthographic mode if False.
:param show_volume_not_detecting: Sensor volume will be shown when not
detecting anything.
:param show_volume_detecting: Sensor will be shown when detecting.
:param passive: Sensor will be passive (use an external image).
:param use_local_lights: Sensor will use local lights.
:param show_fog: Sensor will show fog (if enabled).
:param near_clipping_plane: Near clipping plane.
:param far_clipping_plane: Far clipping plane.
:param view_angle: Perspective angle (in degrees) if in Perspective Mode.
:param ortho_size: Orthographic projection size [m] if in Orthographic
Mode.
:param sensor_size: Size [x, y, z] of the Vision Sensor object.
:param render_mode: Sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
:param position: The [x, y, z] position, if specified.
:param orientation: The [x, y, z] orientation in radians, if specified.
:return: The created Vision Sensor.
"""
options = 0
if explicit_handling:
options |= 1
if perspective_mode:
options |= 2
if not show_volume_not_detecting:
options |= 4
if not show_volume_detecting:
options |= 8
if passive:
options |= 16
if use_local_lights:
options |= 32
if not show_fog:
options |= 64
int_params = [
resolution[0], # 0
resolution[1], # 1
0, # 2
0 # 3
]
if sensor_size is None:
sensor_size = [0.01, 0.01, 0.03]
float_params = [
near_clipping_plane, # 0
far_clipping_plane, # 1
math.radians(view_angle) if perspective_mode else ortho_size, # 2
sensor_size[0], # 3
sensor_size[1], # 4
sensor_size[2], # 5
0.0, # 6
0.0, # 7
0.0, # 8
0.0, # 9
0.0, # 10
]
vs = VisionSensor(
sim.simCreateVisionSensor(options, int_params, float_params, None)
)
vs.set_render_mode(render_mode)
if position is not None:
vs.set_position(position)
if orientation is not None:
vs.set_orientation(orientation)
return vs
def _get_requested_type(self) -> ObjectType:
return ObjectType.VISION_SENSOR
def handle_explicitly(self) -> None:
"""Handle sensor explicitly.
This enables capturing image (e.g., capture_rgb())
without PyRep.step().
"""
if not self.get_explicit_handling():
raise RuntimeError('The explicit_handling is disabled. '
'Call set_explicit_handling(value=1) to enable explicit_handling first.')
sim.simHandleVisionSensor(self._handle)
def capture_rgb(self) -> np.ndarray:
"""Retrieves the rgb-image of a vision sensor.
:return: A numpy array of size (width, height, 3)
"""
return sim.simGetVisionSensorImage(self._handle, self.resolution)
def capture_depth(self, in_meters=False) -> np.ndarray:
"""Retrieves the depth-image of a vision sensor.
:param in_meters: Whether the depth should be returned in meters.
:return: A numpy array of size (width, height)
"""
return sim.simGetVisionSensorDepthBuffer(
self._handle, self.resolution, in_meters)
def capture_pointcloud(self) -> np.ndarray:
"""Retrieves point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
d = self.capture_depth(in_meters=True)
return self.pointcloud_from_depth(d)
def pointcloud_from_depth(self, depth: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
intrinsics = self.get_intrinsic_matrix()
return VisionSensor.pointcloud_from_depth_and_camera_params(
depth, self.get_matrix(), intrinsics)
@staticmethod
def pointcloud_from_depth_and_camera_params(
depth: np.ndarray, extrinsics: np.ndarray,
intrinsics: np.ndarray) -> np.ndarray:
"""Converts depth (in meters) to point cloud in word frame.
:return: A numpy array of size (width, height, 3)
"""
upc = _create_uniform_pixel_coords_image(depth.shape)
pc = upc * np.expand_dims(depth, -1)
C = np.expand_dims(extrinsics[:3, 3], 0).T
R = extrinsics[:3, :3]
R_inv = R.T # inverse of rot matrix is transpose
R_inv_C = np.matmul(R_inv, C)
extrinsics = np.concatenate((R_inv, -R_inv_C), -1)
cam_proj_mat = np.matmul(intrinsics, extrinsics)
cam_proj_mat_homo = np.concatenate(
[cam_proj_mat, [np.array([0, 0, 0, 1])]])
cam_proj_mat_inv = np.linalg.inv(cam_proj_mat_homo)[0:3]
world_coords_homo = np.expand_dims(_pixel_to_world_coords(
pc, cam_proj_mat_inv), 0)
world_coords = world_coords_homo[..., :-1][0]
return world_coords
def get_intrinsic_matrix(self):
res = np.array(self.get_resolution())
pp_offsets = res / 2
ratio = res[0] / res[1]
pa_x = pa_y = math.radians(self.get_perspective_angle())
if ratio > 1:
pa_y = 2 * np.arctan(np.tan(pa_y / 2) / ratio)
elif ratio < 1:
pa_x = 2 * np.arctan(np.tan(pa_x / 2) * ratio)
persp_angles = np.array([pa_x, pa_y])
focal_lengths = -res / (2 * np.tan(persp_angles / 2))
return np.array(
[[focal_lengths[0], 0., pp_offsets[0]],
[0., focal_lengths[1], pp_offsets[1]],
[0., 0., 1.]])
def get_resolution(self) -> List[int]:
""" Return the Sensor's resolution.
:return: Resolution [x, y]
"""
return sim.simGetVisionSensorResolution(self._handle)
def set_resolution(self, resolution: List[int]) -> None:
""" Set the Sensor's resolution.
:param resolution: New resolution [x, y]
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_x, resolution[0]
)
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_resolution_y, resolution[1]
)
self.resolution = resolution
def get_perspective_mode(self) -> PerspectiveMode:
""" Retrieve the Sensor's perspective mode.
:return: The current PerspectiveMode.
"""
perspective_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
)
return PerspectiveMode(perspective_mode)
def set_perspective_mode(self, perspective_mode: PerspectiveMode) -> None:
""" Set the Sensor's perspective mode.
:param perspective_mode: The new perspective mode, one of:
PerspectiveMode.ORTHOGRAPHIC
PerspectiveMode.PERSPECTIVE
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_perspective_operation,
perspective_mode.value
)
def get_render_mode(self) -> RenderMode:
""" Retrieves the Sensor's rendering mode
:return: RenderMode for the current rendering mode.
"""
render_mode = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode
)
return RenderMode(render_mode)
def set_render_mode(self, render_mode: RenderMode) -> None:
""" Set the Sensor's rendering mode
:param render_mode: The new sensor rendering mode, one of:
RenderMode.OPENGL
RenderMode.OPENGL_AUXILIARY
RenderMode.OPENGL_COLOR_CODED
RenderMode.POV_RAY
RenderMode.EXTERNAL
RenderMode.EXTERNAL_WINDOWED
RenderMode.OPENGL3
RenderMode.OPENGL3_WINDOWED
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_render_mode,
render_mode.value
)
def get_windowed_size(self) -> Sequence[int]:
"""Get the size of windowed rendering.
:return: The (x, y) resolution of the window. 0 for full-screen.
"""
size_x = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x)
size_y = sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y)
return size_x, size_y
def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
"""Set the size of windowed rendering.
:param resolution: The (x, y) resolution of the window.
0 for full-screen.
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_x,
resolution[0])
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_windowed_size_y,
resolution[1])
def get_perspective_angle(self) -> float:
""" Get the Sensor's perspective angle.
:return: The sensor's perspective angle (in degrees).
"""
return math.degrees(sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle
))
def set_perspective_angle(self, angle: float) -> None:
""" Set the Sensor's perspective angle.
:param angle: New perspective angle (in degrees)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_perspective_angle,
math.radians(angle)
)
def get_orthographic_size(self) -> float:
""" Get the Sensor's orthographic size.
:return: The sensor's orthographic size (in metres).
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size
)
def set_orthographic_size(self, ortho_size: float) -> None:
""" Set the Sensor's orthographic size.
:param angle: New orthographic size (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_ortho_size, ortho_size
)
def get_near_clipping_plane(self) -> float:
""" Get the Sensor's near clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping
)
def set_near_clipping_plane(self, near_clipping: float) -> None:
""" Set the Sensor's near clipping plane.
:param near_clipping: New near clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_near_clipping, near_clipping
)
def get_far_clipping_plane(self) -> float:
""" Get the Sensor's far clipping plane.
:return: Near clipping plane (metres)
"""
return sim.simGetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping
)
def set_far_clipping_plane(self, far_clipping: float) -> None:
""" Set the Sensor's far clipping plane.
:param far_clipping: New far clipping plane (in metres)
"""
sim.simSetObjectFloatParameter(
self._handle, sim.sim_visionfloatparam_far_clipping, far_clipping
)
def set_entity_to_render(self, entity_to_render: int) -> None:
""" Set the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 to render all objects in scene.
:param entity_to_render: Handle of the entity to render
"""
sim.simSetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render, entity_to_render
)
def get_entity_to_render(self) -> None:
""" Get the entity to render to the Sensor, this can be an object or more usefully a collection.
-1 if all objects in scene are rendered.
:return: Handle of the entity to render
"""
return sim.simGetObjectInt32Parameter(
self._handle, sim.sim_visionintparam_entity_to_render
)
def _create_uniform_pixel_coords_image(resolution: np.ndarray):
pixel_x_coords = np.reshape(
np.tile(np.arange(resolution[1]), [resolution[0]]),
(resolution[0], resolution[1], 1)).astype(np.float32)
pixel_y_coords = np.reshape(
np.tile(np.arange(resolution[0]), [resolution[1]]),
(resolution[1], resolution[0], 1)).astype(np.float32)
pixel_y_coords = np.transpose(pixel_y_coords, (1, 0, 2))
uniform_pixel_coords = np.concatenate(
(pixel_x_coords, pixel_y_coords, np.ones_like(pixel_x_coords)), -1)
return uniform_pixel_coords
def _transform(coords, trans):
h, w = coords.shape[:2]
coords = np.reshape(coords, (h * w, -1))
coords = np.transpose(coords, (1, 0))
transformed_coords_vector = np.matmul(trans, coords)
transformed_coords_vector = np.transpose(
transformed_coords_vector, (1, 0))
return np.reshape(transformed_coords_vector,
(h, w, -1))
def _pixel_to_world_coords(pixel_coords, cam_proj_mat_inv):
h, w = pixel_coords.shape[:2]
pixel_coords = np.concatenate(
[pixel_coords, np.ones((h, w, 1))], -1)
world_coords = _transform(pixel_coords, cam_proj_mat_inv)
world_coords_homo = np.concatenate(
[world_coords, np.ones((h, w, 1))], axis=-1)
return world_coords_homo
object_type_to_class[ObjectType.VISION_SENSOR] = VisionSensor
|
[
"pyrep.const.PerspectiveMode",
"pyrep.backend.sim.simGetVisionSensorResolution",
"numpy.ones",
"numpy.arange",
"pyrep.backend.sim.simGetObjectFloatParameter",
"math.radians",
"numpy.transpose",
"numpy.tan",
"numpy.reshape",
"numpy.ones_like",
"pyrep.backend.sim.simSetObjectInt32Parameter",
"pyrep.backend.sim.simGetVisionSensorDepthBuffer",
"numpy.linalg.inv",
"pyrep.backend.sim.simCreateVisionSensor",
"numpy.concatenate",
"pyrep.backend.sim.simHandleVisionSensor",
"pyrep.backend.sim.simSetObjectFloatParameter",
"numpy.expand_dims",
"pyrep.backend.sim.simGetVisionSensorImage",
"pyrep.const.RenderMode",
"numpy.array",
"pyrep.backend.sim.simGetObjectInt32Parameter",
"numpy.matmul"
] |
[((14652, 14691), 'numpy.transpose', 'np.transpose', (['pixel_y_coords', '(1, 0, 2)'], {}), '(pixel_y_coords, (1, 0, 2))\n', (14664, 14691), True, 'import numpy as np\n'), ((14917, 14948), 'numpy.reshape', 'np.reshape', (['coords', '(h * w, -1)'], {}), '(coords, (h * w, -1))\n', (14927, 14948), True, 'import numpy as np\n'), ((14962, 14990), 'numpy.transpose', 'np.transpose', (['coords', '(1, 0)'], {}), '(coords, (1, 0))\n', (14974, 14990), True, 'import numpy as np\n'), ((15023, 15047), 'numpy.matmul', 'np.matmul', (['trans', 'coords'], {}), '(trans, coords)\n', (15032, 15047), True, 'import numpy as np\n'), ((15080, 15127), 'numpy.transpose', 'np.transpose', (['transformed_coords_vector', '(1, 0)'], {}), '(transformed_coords_vector, (1, 0))\n', (15092, 15127), True, 'import numpy as np\n'), ((15148, 15197), 'numpy.reshape', 'np.reshape', (['transformed_coords_vector', '(h, w, -1)'], {}), '(transformed_coords_vector, (h, w, -1))\n', (15158, 15197), True, 'import numpy as np\n'), ((458, 504), 'pyrep.backend.sim.simGetVisionSensorResolution', 'sim.simGetVisionSensorResolution', (['self._handle'], {}), '(self._handle)\n', (490, 504), False, 'from pyrep.backend import sim\n'), ((4612, 4651), 'pyrep.backend.sim.simHandleVisionSensor', 'sim.simHandleVisionSensor', (['self._handle'], {}), '(self._handle)\n', (4637, 4651), False, 'from pyrep.backend import sim\n'), ((4835, 4893), 'pyrep.backend.sim.simGetVisionSensorImage', 'sim.simGetVisionSensorImage', (['self._handle', 'self.resolution'], {}), '(self._handle, self.resolution)\n', (4862, 4893), False, 'from pyrep.backend import sim\n'), ((5169, 5244), 'pyrep.backend.sim.simGetVisionSensorDepthBuffer', 'sim.simGetVisionSensorDepthBuffer', (['self._handle', 'self.resolution', 'in_meters'], {}), '(self._handle, self.resolution, in_meters)\n', (5202, 5244), False, 'from pyrep.backend import sim\n'), ((6473, 6492), 'numpy.matmul', 'np.matmul', (['R_inv', 'C'], {}), '(R_inv, C)\n', (6482, 6492), True, 'import numpy as np\n'), ((6514, 6551), 'numpy.concatenate', 'np.concatenate', (['(R_inv, -R_inv_C)', '(-1)'], {}), '((R_inv, -R_inv_C), -1)\n', (6528, 6551), True, 'import numpy as np\n'), ((6575, 6608), 'numpy.matmul', 'np.matmul', (['intrinsics', 'extrinsics'], {}), '(intrinsics, extrinsics)\n', (6584, 6608), True, 'import numpy as np\n'), ((7355, 7377), 'numpy.array', 'np.array', (['[pa_x, pa_y]'], {}), '([pa_x, pa_y])\n', (7363, 7377), True, 'import numpy as np\n'), ((7455, 7566), 'numpy.array', 'np.array', (['[[focal_lengths[0], 0.0, pp_offsets[0]], [0.0, focal_lengths[1], pp_offsets\n [1]], [0.0, 0.0, 1.0]]'], {}), '([[focal_lengths[0], 0.0, pp_offsets[0]], [0.0, focal_lengths[1],\n pp_offsets[1]], [0.0, 0.0, 1.0]])\n', (7463, 7566), True, 'import numpy as np\n'), ((7804, 7850), 'pyrep.backend.sim.simGetVisionSensorResolution', 'sim.simGetVisionSensorResolution', (['self._handle'], {}), '(self._handle)\n', (7836, 7850), False, 'from pyrep.backend import sim\n'), ((8024, 8125), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_resolution_x', 'resolution[0]'], {}), '(self._handle, sim.\n sim_visionintparam_resolution_x, resolution[0])\n', (8054, 8125), False, 'from pyrep.backend import sim\n'), ((8151, 8252), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_resolution_y', 'resolution[1]'], {}), '(self._handle, sim.\n sim_visionintparam_resolution_y, resolution[1])\n', (8181, 8252), False, 'from pyrep.backend import sim\n'), ((8501, 8596), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_perspective_operation'], {}), '(self._handle, sim.\n sim_visionintparam_perspective_operation)\n', (8531, 8596), False, 'from pyrep.backend import sim\n'), ((8630, 8663), 'pyrep.const.PerspectiveMode', 'PerspectiveMode', (['perspective_mode'], {}), '(perspective_mode)\n', (8645, 8663), False, 'from pyrep.const import ObjectType, PerspectiveMode, RenderMode\n'), ((8960, 9079), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_perspective_operation', 'perspective_mode.value'], {}), '(self._handle, sim.\n sim_visionintparam_perspective_operation, perspective_mode.value)\n', (8990, 9079), False, 'from pyrep.backend import sim\n'), ((9300, 9385), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_render_mode'], {}), '(self._handle, sim.sim_visionintparam_render_mode\n )\n', (9330, 9385), False, 'from pyrep.backend import sim\n'), ((9418, 9441), 'pyrep.const.RenderMode', 'RenderMode', (['render_mode'], {}), '(render_mode)\n', (9428, 9441), False, 'from pyrep.const import ObjectType, PerspectiveMode, RenderMode\n'), ((9926, 10030), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_render_mode', 'render_mode.value'], {}), '(self._handle, sim.\n sim_visionintparam_render_mode, render_mode.value)\n', (9956, 10030), False, 'from pyrep.backend import sim\n'), ((10261, 10350), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_x'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_x)\n', (10291, 10350), False, 'from pyrep.backend import sim\n'), ((10376, 10465), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_y'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_y)\n', (10406, 10465), False, 'from pyrep.backend import sim\n'), ((10745, 10849), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_x', 'resolution[0]'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_x, resolution[0])\n', (10775, 10849), False, 'from pyrep.backend import sim\n'), ((10878, 10982), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_windowed_size_y', 'resolution[1]'], {}), '(self._handle, sim.\n sim_visionintparam_windowed_size_y, resolution[1])\n', (10908, 10982), False, 'from pyrep.backend import sim\n'), ((11827, 11913), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_ortho_size'], {}), '(self._handle, sim.\n sim_visionfloatparam_ortho_size)\n', (11857, 11913), False, 'from pyrep.backend import sim\n'), ((12121, 12219), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_ortho_size', 'ortho_size'], {}), '(self._handle, sim.\n sim_visionfloatparam_ortho_size, ortho_size)\n', (12151, 12219), False, 'from pyrep.backend import sim\n'), ((12410, 12499), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_near_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_near_clipping)\n', (12440, 12499), False, 'from pyrep.backend import sim\n'), ((12724, 12828), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_near_clipping', 'near_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_near_clipping, near_clipping)\n', (12754, 12828), False, 'from pyrep.backend import sim\n'), ((13017, 13105), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_far_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_far_clipping)\n', (13047, 13105), False, 'from pyrep.backend import sim\n'), ((13325, 13427), 'pyrep.backend.sim.simSetObjectFloatParameter', 'sim.simSetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_far_clipping', 'far_clipping'], {}), '(self._handle, sim.\n sim_visionfloatparam_far_clipping, far_clipping)\n', (13355, 13427), False, 'from pyrep.backend import sim\n'), ((13746, 13854), 'pyrep.backend.sim.simSetObjectInt32Parameter', 'sim.simSetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_entity_to_render', 'entity_to_render'], {}), '(self._handle, sim.\n sim_visionintparam_entity_to_render, entity_to_render)\n', (13776, 13854), False, 'from pyrep.backend import sim\n'), ((14147, 14237), 'pyrep.backend.sim.simGetObjectInt32Parameter', 'sim.simGetObjectInt32Parameter', (['self._handle', 'sim.sim_visionintparam_entity_to_render'], {}), '(self._handle, sim.\n sim_visionintparam_entity_to_render)\n', (14177, 14237), False, 'from pyrep.backend import sim\n'), ((3824, 3890), 'pyrep.backend.sim.simCreateVisionSensor', 'sim.simCreateVisionSensor', (['options', 'int_params', 'float_params', 'None'], {}), '(options, int_params, float_params, None)\n', (3849, 3890), False, 'from pyrep.backend import sim\n'), ((6289, 6314), 'numpy.expand_dims', 'np.expand_dims', (['depth', '(-1)'], {}), '(depth, -1)\n', (6303, 6314), True, 'import numpy as np\n'), ((6327, 6363), 'numpy.expand_dims', 'np.expand_dims', (['extrinsics[:3, 3]', '(0)'], {}), '(extrinsics[:3, 3], 0)\n', (6341, 6363), True, 'import numpy as np\n'), ((6734, 6766), 'numpy.linalg.inv', 'np.linalg.inv', (['cam_proj_mat_homo'], {}), '(cam_proj_mat_homo)\n', (6747, 6766), True, 'import numpy as np\n'), ((11201, 11294), 'pyrep.backend.sim.simGetObjectFloatParameter', 'sim.simGetObjectFloatParameter', (['self._handle', 'sim.sim_visionfloatparam_perspective_angle'], {}), '(self._handle, sim.\n sim_visionfloatparam_perspective_angle)\n', (11231, 11294), False, 'from pyrep.backend import sim\n'), ((11613, 11632), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (11625, 11632), False, 'import math\n'), ((14776, 14804), 'numpy.ones_like', 'np.ones_like', (['pixel_x_coords'], {}), '(pixel_x_coords)\n', (14788, 14804), True, 'import numpy as np\n'), ((15374, 15392), 'numpy.ones', 'np.ones', (['(h, w, 1)'], {}), '((h, w, 1))\n', (15381, 15392), True, 'import numpy as np\n'), ((15524, 15542), 'numpy.ones', 'np.ones', (['(h, w, 1)'], {}), '((h, w, 1))\n', (15531, 15542), True, 'import numpy as np\n'), ((3386, 3410), 'math.radians', 'math.radians', (['view_angle'], {}), '(view_angle)\n', (3398, 3410), False, 'import math\n'), ((7414, 7438), 'numpy.tan', 'np.tan', (['(persp_angles / 2)'], {}), '(persp_angles / 2)\n', (7420, 7438), True, 'import numpy as np\n'), ((6681, 6703), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (6689, 6703), True, 'import numpy as np\n'), ((14370, 14394), 'numpy.arange', 'np.arange', (['resolution[1]'], {}), '(resolution[1])\n', (14379, 14394), True, 'import numpy as np\n'), ((14525, 14549), 'numpy.arange', 'np.arange', (['resolution[0]'], {}), '(resolution[0])\n', (14534, 14549), True, 'import numpy as np\n'), ((7223, 7239), 'numpy.tan', 'np.tan', (['(pa_y / 2)'], {}), '(pa_y / 2)\n', (7229, 7239), True, 'import numpy as np\n'), ((7306, 7322), 'numpy.tan', 'np.tan', (['(pa_x / 2)'], {}), '(pa_x / 2)\n', (7312, 7322), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import time
import copy
import json
from datetime import datetime
import threading
import collector
import siteMapping
class NetworkTracerouteCollector(collector.Collector):
def __init__(self):
self.TOPIC = "/topic/perfsonar.raw.packet-trace"
self.INDEX_PREFIX = 'ps_trace-'
super(NetworkTracerouteCollector, self).__init__()
def eventCreator(self, message):
m = json.loads(message)
data = {
'_type': 'doc'
}
# print(m)
source = m['meta']['source']
destination = m['meta']['destination']
data['MA'] = m['meta']['measurement_agent']
data['src'] = source
data['dest'] = destination
data['src_host'] = m['meta']['input_source']
data['dest_host'] = m['meta']['input_destination']
data['ipv6'] = False
if ':' in source or ':' in destination:
data['ipv6'] = True
so = siteMapping.getPS(source)
de = siteMapping.getPS(destination)
if so != None:
data['src_site'] = so[0]
data['src_VO'] = so[1]
if de != None:
data['dest_site'] = de[0]
data['dest_VO'] = de[1]
data['src_production'] = siteMapping.isProductionThroughput(source)
data['dest_production'] = siteMapping.isProductionThroughput(
destination)
if not 'datapoints' in m:
print(threading.current_thread().name,
"no datapoints found in the message")
return
dp = m['datapoints']
# print(su)
for ts in dp:
dati = datetime.utcfromtimestamp(float(ts))
data['_index'] = self.es_index_prefix + self.INDEX_PREFIX + str(dati.year) + "." + str(dati.month) + "." + str(dati.day)
data['timestamp'] = int(float(ts) * 1000)
data['_id'] = hash((m['meta']['org_metadata_key'], data['timestamp']))
data['hops'] = []
data['rtts'] = []
data['ttls'] = []
hops = dp[ts]
for hop in hops:
if 'ttl' not in hop or 'ip' not in hop or 'query' not in hop:
continue
nq = int(hop['query'])
if nq != 1:
continue
data['hops'].append(hop['ip'])
data['ttls'].append(int(hop['ttl']))
if 'rtt' in hop and hop['rtt'] != None:
data['rtts'].append(float(hop['rtt']))
else:
data['rtts'].append(0.0)
# print(data)
hs = ''
for h in data['hops']:
if h == None:
hs += "None"
else:
hs += h
data['n_hops'] = len(data['hops'])
if len(data['rtts']):
data['max_rtt'] = max(data['rtts'])
data['hash'] = hash(hs)
self.aLotOfData.append(copy.copy(data))
def main():
collector = NetworkTracerouteCollector()
collector.start()
if __name__ == "__main__":
main()
|
[
"json.loads",
"copy.copy",
"collector.start",
"siteMapping.isProductionThroughput",
"threading.current_thread",
"siteMapping.getPS"
] |
[((3086, 3103), 'collector.start', 'collector.start', ([], {}), '()\n', (3101, 3103), False, 'import collector\n'), ((445, 464), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (455, 464), False, 'import json\n'), ((973, 998), 'siteMapping.getPS', 'siteMapping.getPS', (['source'], {}), '(source)\n', (990, 998), False, 'import siteMapping\n'), ((1012, 1042), 'siteMapping.getPS', 'siteMapping.getPS', (['destination'], {}), '(destination)\n', (1029, 1042), False, 'import siteMapping\n'), ((1268, 1310), 'siteMapping.isProductionThroughput', 'siteMapping.isProductionThroughput', (['source'], {}), '(source)\n', (1302, 1310), False, 'import siteMapping\n'), ((1345, 1392), 'siteMapping.isProductionThroughput', 'siteMapping.isProductionThroughput', (['destination'], {}), '(destination)\n', (1379, 1392), False, 'import siteMapping\n'), ((2993, 3008), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (3002, 3008), False, 'import copy\n'), ((1458, 1484), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (1482, 1484), False, 'import threading\n')]
|
from os.path import dirname, join, isfile
# Constants
PROJECT_ROOT_DIRECTORY = dirname(dirname(__file__))
DUMP_FILE_SUFFIX = "_dump.csv"
def getFullPath(*path):
return join(PROJECT_ROOT_DIRECTORY, *path)
def getUserLastDumpFilePath(userId):
return getFullPath('resources', 'dump_files', "{0}{1}".format(userId, DUMP_FILE_SUFFIX))
def writeToCsvFile(userId, headers, rows):
target = open(getUserLastDumpFilePath(userId),'w+')
target.truncate()
# #dump same data to file without format
rows[0] = headers
for i in range(len(rows)):
value = ', '.join([ rows[i][index] for index in range(len(rows[i])) ])
target.write(value + "\n")
target.close()
|
[
"os.path.dirname",
"os.path.join"
] |
[((88, 105), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'from os.path import dirname, join, isfile\n'), ((174, 209), 'os.path.join', 'join', (['PROJECT_ROOT_DIRECTORY', '*path'], {}), '(PROJECT_ROOT_DIRECTORY, *path)\n', (178, 209), False, 'from os.path import dirname, join, isfile\n')]
|
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Flatten, Conv2D
from keras.layers import MaxPooling2D, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from src.utils.train_utils import post_process
class MathewTrainer:
def __init__(self):
self.image_x = 100
self.image_y = 100
self.train_dir = "data/"
self.batch_size = 64
self.model_name = "model/mathew.h5"
def keras_model(self, image_x, image_y):
num_of_classes = 14
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(64, (2, 2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(128, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(num_of_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath = self.model_name
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
return model, callbacks_list
def train(self):
train_datagen = ImageDataGenerator(
rescale=1. / 255,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
rotation_range=15,
zoom_range=0.2,
horizontal_flip=False,
validation_split=0.2,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
self.train_dir,
target_size=(self.image_x, self.image_y),
color_mode="grayscale",
batch_size=self.batch_size,
seed=42,
class_mode='categorical',
subset="training",
shuffle=True)
validation_generator = train_datagen.flow_from_directory(
self.train_dir,
target_size=(self.image_x, self.image_y),
color_mode="grayscale",
batch_size=self.batch_size,
seed=42,
class_mode='categorical',
subset="validation",
shuffle=False)
print(validation_generator.class_indices)
model, callbacks_list = self.keras_model(self.image_x, self.image_y)
print(model.summary())
his = model.fit_generator(train_generator, epochs=20, validation_data=validation_generator)
model.save(self.model_name)
post_process(model, validation_generator, his)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"keras.layers.Flatten",
"src.utils.train_utils.post_process",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.layers.MaxPooling2D"
] |
[((576, 588), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (586, 588), False, 'from keras.models import Sequential\n'), ((1565, 1657), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=1, save_best_only=True,\n mode='max')\n", (1580, 1657), False, 'from keras.callbacks import ModelCheckpoint\n'), ((1776, 1987), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.2)', 'rotation_range': '(15)', 'zoom_range': '(0.2)', 'horizontal_flip': '(False)', 'validation_split': '(0.2)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, width_shift_range=0.2,\n height_shift_range=0.2, shear_range=0.2, rotation_range=15, zoom_range=\n 0.2, horizontal_flip=False, validation_split=0.2, fill_mode='nearest')\n", (1794, 1987), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3069, 3115), 'src.utils.train_utils.post_process', 'post_process', (['model', 'validation_generator', 'his'], {}), '(model, validation_generator, his)\n', (3081, 3115), False, 'from src.utils.train_utils import post_process\n'), ((607, 679), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(2, 2)'], {'input_shape': '(image_x, image_y, 1)', 'activation': '"""relu"""'}), "(32, (2, 2), input_shape=(image_x, image_y, 1), activation='relu')\n", (613, 679), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((699, 761), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='same')\n", (711, 761), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((781, 853), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(2, 2)'], {'input_shape': '(image_x, image_y, 1)', 'activation': '"""relu"""'}), "(64, (2, 2), input_shape=(image_x, image_y, 1), activation='relu')\n", (787, 853), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((873, 935), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='same')\n", (885, 935), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((955, 993), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(2, 2)'], {'activation': '"""relu"""'}), "(128, (2, 2), activation='relu')\n", (961, 993), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((1013, 1075), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(5, 5)', 'strides': '(5, 5)', 'padding': '"""same"""'}), "(pool_size=(5, 5), strides=(5, 5), padding='same')\n", (1025, 1075), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((1095, 1104), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1102, 1104), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((1124, 1154), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (1129, 1154), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((1174, 1186), 'keras.layers.Dropout', 'Dropout', (['(0.6)'], {}), '(0.6)\n', (1181, 1186), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((1206, 1235), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (1211, 1235), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((1255, 1267), 'keras.layers.Dropout', 'Dropout', (['(0.6)'], {}), '(0.6)\n', (1262, 1267), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((1287, 1316), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1292, 1316), False, 'from keras.layers import Dense, Flatten, Conv2D\n'), ((1336, 1348), 'keras.layers.Dropout', 'Dropout', (['(0.6)'], {}), '(0.6)\n', (1343, 1348), False, 'from keras.layers import MaxPooling2D, Dropout\n'), ((1368, 1411), 'keras.layers.Dense', 'Dense', (['num_of_classes'], {'activation': '"""softmax"""'}), "(num_of_classes, activation='softmax')\n", (1373, 1411), False, 'from keras.layers import Dense, Flatten, Conv2D\n')]
|
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from collections import defaultdict
import tokenize
import sys
from ..common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxint))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxint))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
|
[
"collections.defaultdict"
] |
[((1297, 1314), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1308, 1314), False, 'from collections import defaultdict\n')]
|
import SimpleITK as sitk
import csv
# Load the Images to be measured
ScalarValuesFile = '~/SimpleITK-MICCAI-2011-Tutorial/Data/FA.png'
ScalarValuesImage = sitk.Cast( sitk.ReadImage(ScalarValuesFile), sitk.sitkUInt32 )
sitk.Show ( ScalarValuesImage )
LabelMapFile = '~/SimpleITK-MICCAI-2011-Tutorial/Data/LB.png'
LabelMapImage = sitk.Cast( sitk.ReadImage(LabelMapFile), sitk.sitkUInt32 )
sitk.Show ( LabelMapFile )
# <demo> stop
lsfilter = sitk.LabelStatisticsImageFilter()
lsfilter.Execute(LabelMapImage,ScalarValuesImage)
keys = lsfilter.GetValidLabels();
# <demo> stop
### Now extract measurement values to cataloging in a database/spreadsheet
MySubjectID="Subj01"
measurementDict=dict()
for labelValue in keys:
uniqueId = ( MySubjectID, labelValue )
measurementMap=lsfilter.GetMeasurementMap(labelValue)
measurementDict[uniqueId]=dict( measurementMap )
# <demo> stop
print("DUMPING MEASUREMENT DICTIONARY")
print(measurementDict)
# <demo> stop
#A map between internal labels and header row strings.
headerMap={'SUBJID':'SubjectID',
'LABELID':'LabelID',
'Variance':'Variance',
'Minimum':'Minimum',
'Maximum':'Maximum',
'Mean':'Mean',
'Count':'NumPixels',
'approxMedian':'Median',
'Sum':'Sum',
'Sigma':'Sigma'}
csvFileName="MyValues.csv"
csvFile=open(csvFileName, 'wb')
myDictWriter=csv.DictWriter(csvFile,headerMap.keys())
myDictWriter.writerow(headerMap)
for uniqueId in measurementDict.keys():
unrollRow = measurementDict[uniqueId]
unrollRow['SUBJID']=uniqueId[0]
unrollRow['LABELID']=uniqueId[1]
myDictWriter.writerow(unrollRow)
csvFile.close()
|
[
"SimpleITK.Show",
"SimpleITK.ReadImage",
"SimpleITK.LabelStatisticsImageFilter"
] |
[((219, 247), 'SimpleITK.Show', 'sitk.Show', (['ScalarValuesImage'], {}), '(ScalarValuesImage)\n', (228, 247), True, 'import SimpleITK as sitk\n'), ((389, 412), 'SimpleITK.Show', 'sitk.Show', (['LabelMapFile'], {}), '(LabelMapFile)\n', (398, 412), True, 'import SimpleITK as sitk\n'), ((443, 476), 'SimpleITK.LabelStatisticsImageFilter', 'sitk.LabelStatisticsImageFilter', ([], {}), '()\n', (474, 476), True, 'import SimpleITK as sitk\n'), ((167, 199), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['ScalarValuesFile'], {}), '(ScalarValuesFile)\n', (181, 199), True, 'import SimpleITK as sitk\n'), ((341, 369), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['LabelMapFile'], {}), '(LabelMapFile)\n', (355, 369), True, 'import SimpleITK as sitk\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/v2x/proto/v2x_service_obu_to_car.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.perception.proto import perception_obstacle_pb2 as modules_dot_perception_dot_proto_dot_perception__obstacle__pb2
from modules.v2x.proto import v2x_traffic_light_pb2 as modules_dot_v2x_dot_proto_dot_v2x__traffic__light__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/v2x/proto/v2x_service_obu_to_car.proto',
package='apollo.v2x',
syntax='proto2',
serialized_pb=_b('\n.modules/v2x/proto/v2x_service_obu_to_car.proto\x12\napollo.v2x\x1a\x32modules/perception/proto/perception_obstacle.proto\x1a)modules/v2x/proto/v2x_traffic_light.proto\"I\n\x0eStatusResponse\x12\x15\n\x06status\x18\x01 \x02(\x08:\x05\x66\x61lse\x12\x0c\n\x04info\x18\x02 \x01(\t\x12\x12\n\nerror_code\x18\x03 \x01(\x03\x32\xca\x01\n\x08ObuToCar\x12_\n\x17SendPerceptionObstacles\x12&.apollo.perception.PerceptionObstacles\x1a\x1a.apollo.v2x.StatusResponse\"\x00\x12]\n\x13SendV2xTrafficLight\x12(.apollo.v2x.IntersectionTrafficLightData\x1a\x1a.apollo.v2x.StatusResponse\"\x00')
,
dependencies=[modules_dot_perception_dot_proto_dot_perception__obstacle__pb2.DESCRIPTOR,modules_dot_v2x_dot_proto_dot_v2x__traffic__light__pb2.DESCRIPTOR,])
_STATUSRESPONSE = _descriptor.Descriptor(
name='StatusResponse',
full_name='apollo.v2x.StatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='apollo.v2x.StatusResponse.status', index=0,
number=1, type=8, cpp_type=7, label=2,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='info', full_name='apollo.v2x.StatusResponse.info', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_code', full_name='apollo.v2x.StatusResponse.error_code', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=157,
serialized_end=230,
)
DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
DESCRIPTOR = _STATUSRESPONSE,
__module__ = 'modules.v2x.proto.v2x_service_obu_to_car_pb2'
# @@protoc_insertion_point(class_scope:apollo.v2x.StatusResponse)
))
_sym_db.RegisterMessage(StatusResponse)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
] |
[((513, 539), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (537, 539), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1873, 2186), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""status"""', 'full_name': '"""apollo.v2x.StatusResponse.status"""', 'index': '(0)', 'number': '(1)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(2)', 'has_default_value': '(True)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='status', full_name=\n 'apollo.v2x.StatusResponse.status', index=0, number=1, type=8, cpp_type\n =7, label=2, has_default_value=True, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, options=None)\n", (1900, 2186), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2561, 2878), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""error_code"""', 'full_name': '"""apollo.v2x.StatusResponse.error_code"""', 'index': '(2)', 'number': '(3)', 'type': '(3)', 'cpp_type': '(2)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='error_code', full_name=\n 'apollo.v2x.StatusResponse.error_code', index=2, number=3, type=3,\n cpp_type=2, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, options=None)\n", (2588, 2878), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import os
import numpy as np
import re
import sys
try:
import h5py
except ImportError:
h5py = None
'''
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
'''
from .. import logger, logging
from .base import MFPackage, MissingFile
from .name import Modflow
_re_fmtin = re.compile(
r'\((?P<body>(?P<rep>\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\d+)(\.(?P<d>\d+))?'
r'|FREE|BINARY)\)')
class MFFileReader(object):
"""MODFLOW file reader"""
_parent_class = MFPackage
def __init__(self, f=None, parent=None):
"""Initialize with a file and an instance of a parent class
Parameters
----------
f : str, file-like object or None
A path to a file, or a file-like reader with with a 'readlines'
method, such as BytesIO. If None, then it is obtained from
parent.fpath, or parent.fname
parent : instance of MFPackage
"""
# Set up logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logger.level)
if parent is None:
parent = self._parent_class()
if not isinstance(parent, self._parent_class):
self.logger.error(
"'parent' should be an instance of a %r object; found %r",
self._parent_class.__name__, parent.__class__.__name__)
self.parent = parent
if f is None:
if getattr(parent, 'fpath', None) is not None:
f = parent.fpath
elif getattr(parent, 'fname', None) is not None:
f = parent.fname
else:
raise ValueError('unsure how to open file')
# Read data
if hasattr(f, 'readlines'):
# it is a file reader object, e.g. BytesIO
self.fname = f.__class__.__name__
self.lines = f.readlines()
else:
self.fpath = self.parent.fpath = f
if getattr(self, 'fname', None) is None:
self.fname = os.path.split(self.parent.fpath)[1]
# Read whole file at once, then close it
with open(self.parent.fpath, 'r') as fp:
self.lines = fp.readlines()
if self.parent.nam is None:
self.parent.nam = Modflow()
try:
self.parent.nam.ref_dir = os.path.dirname(self.fpath)
except:
pass
self.logger.info("read file '%s' with %d lines",
self.fname, len(self.lines))
self.lineno = 0
self.data_set_num = None
def __len__(self):
"""Returns number of lines"""
return len(self.lines)
def location_exception(self, e):
"""Use to show location of exception while reading file
Example:
fp = _MFFileReader(fpath, self)
try:
fp.read_text(0)
...
fp.check_end()
except Exception as e:
exec(fp.location_exception(e))
"""
location = '%s:%s:%s:Data set %s:' % \
(self.parent.__class__.__name__, self.fname, self.lineno,
self.data_set_num)
if sys.version_info[0] < 3:
return "import sys; raise type(e), type(e)('" + location + "' + " \
"str(e)), sys.exc_info()[2]"
else:
return "import sys; raise type(e)(str(e) + '" + location + "' + " \
"str(e)).with_traceback(sys.exc_info()[2])"
def check_end(self):
"""Check end of file and show messages in logger on status"""
if len(self) == self.lineno:
self.logger.info("finished reading %d lines", self.lineno)
elif len(self) > self.lineno:
remain = len(self) - self.lineno
a, b = 's', ''
if remain == 1:
b, a = a, b
self.logger.warn(
"finished reading %d lines, but %d line%s remain%s",
self.lineno, remain, a, b)
else:
raise ValueError("%d > %d ?" % (self.lineno, len(self)))
@property
def curinfo(self):
"""Returns line and data set number info"""
return str(self.lineno) + ':Data set ' + str(self.data_set_num)
@property
def not_eof(self):
"""Reader is not at the end of file (EOF)"""
return self.lineno < len(self.lines)
@property
def curline(self):
"""Return the current line"""
try:
if self.lineno == 0:
return ''
else:
return self.lines[self.lineno - 1]
except IndexError:
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
def nextline(self, data_set_num=None):
"""Get next line, setting data set number and increment lineno"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug('%s:using nextline', self.curinfo)
self.lineno += 1
try:
line = self.lines[self.lineno - 1]
except IndexError:
self.lineno -= 1
self.logger.error('%s:Unexpected end of file', self.curinfo)
raise IndexError('Unexpected end of file')
if data_set_num is not None:
self.logger.debug(
'%s:returning line with length %d:%r',
self.curinfo, len(line), line)
return line
def readline(self):
"""Alias for nextline()"""
return self.nextline()
def conv(self, item, fmt, name=None):
"""Convert item to format fmt
Parameters
----------
item : str
fmt : str, default ('s')
's' for string or no conversion (default)
'i' for integer
'f' for float
name : str or None
Optional name to provide context information for debugging
"""
try:
if type(fmt) == np.dtype:
return fmt.type(item)
elif fmt == 's': # string
return item
elif fmt == 'i': # integer
return int(item)
elif fmt == 'f': # any floating-point number
# typically either a REAL or DOUBLE PRECISION
return self.parent._float_type.type(item)
else:
raise ValueError('Unknown fmt code %r' % (fmt,))
except ValueError:
if name is not None:
msg = 'Cannot cast %r of %r to type %r' % (name, item, fmt)
else:
msg = 'Cannot cast %r to type %r' % (item, fmt)
raise ValueError(msg)
def get_items(self, data_set_num=None, num_items=None, fmt='s',
multiline=False):
"""Get items from one or more lines (if multiline) into a list
If num_items is defined, then only this count will be returned and any
remaining items from the line will be ignored. If there are too few
items on the line, the values will be some form of "zero", such as
0, 0.0 or ''.
However, if `multiline=True`, then multiple lines can be read to reach
num_items.
If fmt is defined, it must be:
- 's' for string or no conversion (default)
- 'i' for integer
- 'f' for float, as defined by parent._float_type
"""
if data_set_num is not None:
self.data_set_num = data_set_num
self.logger.debug(
'%s:using get_items for num_items=%s',
self.curinfo, num_items)
startln = self.lineno + 1
fill_missing = False
if num_items is None or not multiline:
items = self.nextline().split()
if num_items is not None and len(items) > num_items:
items = items[:num_items]
if (not multiline and num_items is not None and
len(items) < num_items):
fill_missing = (num_items - len(items))
else:
assert isinstance(num_items, int), type(num_items)
assert num_items > 0, num_items
items = []
while len(items) < num_items:
items += self.nextline().split()
if len(items) > num_items: # trim off too many
items = items[:num_items]
if fmt == 's':
res = items
else:
res = [self.conv(x, fmt) for x in items]
if fill_missing:
if fmt == 's':
fill_value = ''
else:
fill_value = '0'
res += [self.conv(fill_value, fmt)] * fill_missing
if data_set_num is not None:
if multiline:
toline = ' to %s' % (self.lineno,)
else:
toline = ''
self.logger.debug('%s:read %d items from line %d%s',
self.data_set_num, num_items, startln, toline)
return res
def get_named_items(self, data_set_num, names, fmt='s'):
"""Get items into dict. See get_items for fmt usage"""
items = self.get_items(data_set_num, len(names), fmt)
res = {}
for name, item in zip(names, items):
if fmt != 's':
item = self.conv(item, fmt, name)
res[name] = item
return res
def read_named_items(self, data_set_num, names, fmt='s'):
"""Read items into parent. See get_items for fmt usage"""
startln = self.lineno + 1
items = self.get_named_items(data_set_num, names, fmt)
for name in items.keys():
setattr(self.parent, name, items[name])
self.logger.debug('%s:read %d items from line %d',
self.data_set_num, len(items), startln)
def read_text(self, data_set_num=0):
"""Reads 0 or more text (comment) for lines that start with '#'"""
startln = self.lineno + 1
self.parent.text = []
while True:
try:
line = self.nextline(data_set_num)
except IndexError:
break
if line.startswith('#'):
line = line[1:].strip()
self.parent.text.append(line)
else:
self.lineno -= 1 # scroll back one?
break
self.logger.debug('%s:read %d lines of text from line %d to %d',
self.data_set_num,
len(self.parent.text), startln, self.lineno)
def read_options(self, data_set_num, process_aux=True):
"""Read options, and optionally process auxiliary variables"""
line = self.nextline(data_set_num)
self.parent.Options = line.upper().split()
if hasattr(self.parent, 'valid_options'):
for opt in self.parent.Options:
if opt not in self.parent.Options:
self.logger.warn("%s:unrecognised option %r",
self.data_set_num, opt)
if process_aux:
raise NotImplementedError
else:
self.logger.debug('%s:read %d options from line %d:%s',
self.data_set_num, len(self.parent.Options),
self.lineno, self.parent.Options)
def read_parameter(self, data_set_num, names):
"""Read [PARAMETER values]
This optional item must start with the word "PARAMETER". If not found,
then names are set to 0.
Parameter names are provided in a list, and are stored as integers
to the parent object.
"""
startln = self.lineno + 1
line = self.nextline(data_set_num)
self.lineno -= 1
if line.upper().startswith('PARAMETER'):
items = self.get_items(num_items=len(names) + 1)
assert items[0].upper() == 'PARAMETER', items[0]
for name, item in zip(names, items[1:]):
value = self.conv(item, 'i', name)
setattr(self.parent, name, value)
else:
for name in names:
setattr(self.parent, name, 0)
self.logger.debug('%s:read %d parameters from line %d',
self.data_set_num, len(names), startln)
def get_array(self, data_set_num, shape, dtype, return_dict=False):
"""Returns array data, similar to array reading utilities U2DREL,
U2DINT, and U1DREL. If return_dict=True, a dict is returned with all
other attributes.
Inputs:
data_set_num - number
shape - 1D array, e.g. 10, or 2D array (20, 30)
dtype - e.g. np.float32 or 'f'
See Page 8-57 from the MODFLOW-2005 mannual for details.
"""
startln = self.lineno + 1
res = {}
first_line = self.nextline(data_set_num)
# Comments are considered after a '#' character on the first line
if '#' in first_line:
res['text'] = first_line[(first_line.find('#') + 1):].strip()
num_type = np.dtype(dtype).type
res['array'] = ar = np.empty(shape, dtype=dtype)
num_items = ar.size
def read_array_data(obj, fmtin):
'''Helper subroutine to actually read array data'''
fmt = _re_fmtin.search(fmtin.upper())
if not fmt:
raise ValueError(
'cannot understand Fortran format: ' + repr(fmtin))
fmt = fmt.groupdict()
if fmt['body'] == 'BINARY':
data_size = ar.size * ar.dtype.itemsize
if hasattr(obj, 'read'):
data = obj.read(data_size)
else:
raise NotImplementedError(
"not sure how to 'read' from " + repr(obj))
iar = np.fromstring(data, dtype)
else: # ASCII
items = []
if not hasattr(obj, 'readline'):
raise NotImplementedError(
"not sure how to 'readline' from " + repr(obj))
if fmt['body'] == 'FREE':
while len(items) < num_items:
items += obj.readline().split()
else: # interpret Fortran format
if fmt['rep']:
rep = int(fmt['rep'])
else:
rep = 1
width = int(fmt['w'])
while len(items) < num_items:
line = obj.readline()
pos = 0
for n in range(rep):
try:
item = line[pos:pos + width].strip()
pos += width
if item:
items.append(item)
except IndexError:
break
iar = np.fromiter(items, dtype=dtype)
if iar.size != ar.size:
raise ValueError('expected size %s, but found %s' %
(ar.size, iar.size))
return iar
# First, assume using more modern free-format control line
control_line = first_line
dat = control_line.split()
# First item is the control word
res['cntrl'] = cntrl = dat[0].upper()
if cntrl == 'CONSTANT':
# CONSTANT CNSTNT
if len(dat) < 2:
raise ValueError(
'expecting to find at least 2 items for CONSTANT')
res['cnstnt'] = cnstnt = dat[1]
if len(dat) > 2 and 'text' not in res:
st = first_line.find(cnstnt) + len(cnstnt)
res['text'] = first_line[st:].strip()
ar.fill(cnstnt)
elif cntrl == 'INTERNAL':
# INTERNAL CNSTNT FMTIN [IPRN]
if len(dat) < 3:
raise ValueError(
'expecting to find at least 3 items for INTERNAL')
res['cnstnt'] = cnstnt = dat[1]
res['fmtin'] = fmtin = dat[2]
if len(dat) >= 4:
res['iprn'] = iprn = dat[3] # not used
if len(dat) > 4 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
iar = read_array_data(self, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'EXTERNAL':
# EXTERNAL Nunit CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for EXTERNAL')
res['nunit'] = nunit = int(dat[1])
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4] # not used
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
# Needs a reference to nam[nunit]
if self.parent.nam is None:
raise AttributeError(
"reference to 'nam' required for EXTERNAL array")
try:
obj = self.parent.nam[nunit]
except KeyError:
raise KeyError("nunit %s not in nam", nunit)
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'OPEN/CLOSE':
# OPEN/CLOSE FNAME CNSTNT FMTIN IPRN
if len(dat) < 5:
raise ValueError(
'expecting to find at least 5 items for OPEN/CLOSE')
res['fname'] = fname = dat[1]
res['cnstnt'] = cnstnt = dat[2]
res['fmtin'] = fmtin = dat[3].upper()
res['iprn'] = iprn = dat[4]
if len(dat) > 5 and 'text' not in res:
st = first_line.find(iprn, first_line.find(fmtin)) + len(iprn)
res['text'] = first_line[st:].strip()
with open(fname, 'rb') as fp:
iar = read_array_data(fp, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
elif cntrl == 'HDF5':
# GMS extension: http://www.xmswiki.com/xms/GMS:MODFLOW_with_HDF5
if not h5py:
raise ImportError('h5py module required to read HDF5 data')
# HDF5 CNSTNT IPRN "FNAME" "pathInFile" nDim start1 nToRead1 ...
file_ch = r'\w/\.\-\+_\(\)'
dat = re.findall('([' + file_ch + ']+|"[' + file_ch + ' ]+")',
control_line)
if len(dat) < 8:
raise ValueError('expecting to find at least 8 '
'items for HDF5; found ' + str(len(dat)))
assert dat[0].upper() == 'HDF5', dat[0]
res['cnstnt'] = cnstnt = dat[1]
try:
cnstnt_val = num_type(cnstnt)
except ValueError: # e.g. 1.0 as int 1
cnstnt_val = num_type(float(cnstnt))
res['iprn'] = dat[2]
res['fname'] = fname = dat[3].strip('"')
res['pathInFile'] = pathInFile = dat[4].strip('"')
nDim = int(dat[5])
nDim_len = {1: 8, 2: 10, 3: 12}
if nDim not in nDim_len:
raise ValueError('expecting to nDim to be one of 1, 2, or 3; '
'found ' + str(nDim))
elif len(dat) < nDim_len[nDim]:
raise ValueError(
('expecting to find at least %d items for HDF5 with '
'%d dimensions; found %d') %
(nDim_len[nDim], nDim, len(dat)))
elif len(dat) > nDim_len[nDim]:
token = dat[nDim_len[nDim]]
st = first_line.find(token) + len(token)
res['text'] = first_line[st:].strip()
if nDim >= 1:
start1, nToRead1 = int(dat[6]), int(dat[7])
slice1 = slice(start1, start1 + nToRead1)
if nDim >= 2:
start2, nToRead2 = int(dat[8]), int(dat[9])
slice2 = slice(start2, start2 + nToRead2)
if nDim == 3:
start3, nToRead3 = int(dat[10]), int(dat[11])
slice3 = slice(start3, start3 + nToRead3)
fpath = os.path.join(self.parent.nam.ref_dir, fname)
if not os.path.isfile(fpath):
raise MissingFile("cannot find file '%s'" % (fpath,))
h5 = h5py.File(fpath, 'r')
ds = h5[pathInFile]
if nDim == 1:
iar = ds[slice1]
elif nDim == 2:
iar = ds[slice1, slice2]
elif nDim == 3:
iar = ds[slice1, slice2, slice3]
h5.close()
ar[:] = iar.reshape(shape) * cnstnt_val
elif len(control_line) > 20: # FIXED-FORMAT CONTROL LINE
# LOCAT CNSTNT FMTIN IPRN
del res['cntrl'] # control word was not used for fixed-format
try:
res['locat'] = locat = int(control_line[0:10])
res['cnstnt'] = cnstnt = control_line[10:20].strip()
if len(control_line) > 20:
res['fmtin'] = fmtin = control_line[20:40].strip().upper()
if len(control_line) > 40:
res['iprn'] = iprn = control_line[40:50].strip()
except ValueError:
raise ValueError('fixed-format control line not '
'understood: ' + repr(control_line))
if len(control_line) > 50 and 'text' not in res:
res['text'] = first_line[50:].strip()
if locat == 0: # all elements are set equal to cnstnt
ar.fill(cnstnt)
else:
nunit = abs(locat)
if self.parent.nunit == nunit:
obj = self
elif self.parent.nam is None:
obj = self
else:
obj = self.parent.nam[nunit]
if locat < 0:
fmtin = '(BINARY)'
iar = read_array_data(obj, fmtin)
ar[:] = iar.reshape(shape) * num_type(cnstnt)
else:
raise ValueError('array control line not understood: ' +
repr(control_line))
if 'text' in res:
withtext = ' with text "' + res['text'] + '"'
else:
withtext = ''
self.logger.debug(
'%s:read %r array with shape %s from line %d to %d%s',
self.data_set_num, ar.dtype.char, ar.shape,
startln, self.lineno, withtext)
if return_dict:
return res
else:
return ar
|
[
"h5py.File",
"numpy.empty",
"os.path.dirname",
"numpy.dtype",
"os.path.isfile",
"re.findall",
"numpy.fromiter",
"os.path.split",
"os.path.join",
"numpy.fromstring",
"re.compile"
] |
[((333, 449), 're.compile', 're.compile', (['"""\\\\((?P<body>(?P<rep>\\\\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\\\\d+)(\\\\.(?P<d>\\\\d+))?|FREE|BINARY)\\\\)"""'], {}), "(\n '\\\\((?P<body>(?P<rep>\\\\d*)(?P<symbol>[IEFG][SN]?)(?P<w>\\\\d+)(\\\\.(?P<d>\\\\d+))?|FREE|BINARY)\\\\)'\n )\n", (343, 449), False, 'import re\n'), ((13151, 13179), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (13159, 13179), True, 'import numpy as np\n'), ((13102, 13117), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (13110, 13117), True, 'import numpy as np\n'), ((2382, 2409), 'os.path.dirname', 'os.path.dirname', (['self.fpath'], {}), '(self.fpath)\n', (2397, 2409), False, 'import os\n'), ((13871, 13897), 'numpy.fromstring', 'np.fromstring', (['data', 'dtype'], {}), '(data, dtype)\n', (13884, 13897), True, 'import numpy as np\n'), ((15022, 15053), 'numpy.fromiter', 'np.fromiter', (['items'], {'dtype': 'dtype'}), '(items, dtype=dtype)\n', (15033, 15053), True, 'import numpy as np\n'), ((2061, 2093), 'os.path.split', 'os.path.split', (['self.parent.fpath'], {}), '(self.parent.fpath)\n', (2074, 2093), False, 'import os\n'), ((18679, 18749), 're.findall', 're.findall', (['(\'([\' + file_ch + \']+|"[\' + file_ch + \' ]+")\')', 'control_line'], {}), '(\'([\' + file_ch + \']+|"[\' + file_ch + \' ]+")\', control_line)\n', (18689, 18749), False, 'import re\n'), ((20516, 20560), 'os.path.join', 'os.path.join', (['self.parent.nam.ref_dir', 'fname'], {}), '(self.parent.nam.ref_dir, fname)\n', (20528, 20560), False, 'import os\n'), ((20690, 20711), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (20699, 20711), False, 'import h5py\n'), ((20580, 20601), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (20594, 20601), False, 'import os\n')]
|
from typing import TYPE_CHECKING, List, Optional
import mymax as my
def demo_args_list_float() -> None:
args = [2.5, 3.5, 1.5]
expected = 3.5
result = my.max(*args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_int() -> None:
args = [30, 10, 20]
expected = 30
result = my.max(args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_str() -> None:
args = iter('banana kiwi mango apple'.split())
expected = 'mango'
result = my.max(args)
print(args, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(expected)
reveal_type(result)
def demo_args_iter_not_comparable_with_key() -> None:
args = [object(), object(), object()]
key = id
expected = max(args, key=id)
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
def demo_empty_iterable_with_default() -> None:
args: List[float] = []
default = None
expected = None
result = my.max(args, default=default)
print(args, default, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(default)
reveal_type(expected)
reveal_type(result)
def demo_different_key_return_type() -> None:
args = iter('banana kiwi mango apple'.split())
key = len
expected = 'banana'
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
def demo_different_key_none() -> None:
args = iter('banana kiwi mango apple'.split())
key = None
expected = 'mango'
result = my.max(args, key=key)
print(args, key, expected, result, sep='\n')
assert result == expected
if TYPE_CHECKING:
reveal_type(args)
reveal_type(key)
reveal_type(expected)
reveal_type(result)
###################################### intentional type errors
def error_reported_bug() -> None:
# example from https://github.com/python/typeshed/issues/4051
top: Optional[int] = None
try:
my.max(5, top)
except TypeError as exc:
print(exc)
def error_args_iter_not_comparable() -> None:
try:
my.max([None, None])
except TypeError as exc:
print(exc)
def error_single_arg_not_iterable() -> None:
try:
my.max(1)
except TypeError as exc:
print(exc)
def main():
for name, val in globals().items():
if name.startswith('demo') or name.startswith('error'):
print('_' * 20, name)
val()
if __name__ == '__main__':
main()
|
[
"mymax.max"
] |
[((165, 178), 'mymax.max', 'my.max', (['*args'], {}), '(*args)\n', (171, 178), True, 'import mymax as my\n'), ((449, 461), 'mymax.max', 'my.max', (['args'], {}), '(args)\n', (455, 461), True, 'import mymax as my\n'), ((765, 777), 'mymax.max', 'my.max', (['args'], {}), '(args)\n', (771, 777), True, 'import mymax as my\n'), ((1115, 1136), 'mymax.max', 'my.max', (['args'], {'key': 'key'}), '(args, key=key)\n', (1121, 1136), True, 'import mymax as my\n'), ((1476, 1505), 'mymax.max', 'my.max', (['args'], {'default': 'default'}), '(args, default=default)\n', (1482, 1505), True, 'import mymax as my\n'), ((1874, 1895), 'mymax.max', 'my.max', (['args'], {'key': 'key'}), '(args, key=key)\n', (1880, 1895), True, 'import mymax as my\n'), ((2249, 2270), 'mymax.max', 'my.max', (['args'], {'key': 'key'}), '(args, key=key)\n', (2255, 2270), True, 'import mymax as my\n'), ((2693, 2707), 'mymax.max', 'my.max', (['(5)', 'top'], {}), '(5, top)\n', (2699, 2707), True, 'import mymax as my\n'), ((2821, 2841), 'mymax.max', 'my.max', (['[None, None]'], {}), '([None, None])\n', (2827, 2841), True, 'import mymax as my\n'), ((2954, 2963), 'mymax.max', 'my.max', (['(1)'], {}), '(1)\n', (2960, 2963), True, 'import mymax as my\n')]
|
import logging
from google.cloud import pubsub
from google.cloud import secretmanager
class GCPPubSubService:
_client = None
@classmethod
def get_client(cls):
if cls._client is None:
cls._client = pubsub.PublisherClient()
return cls._client
@classmethod
def publish_message(cls, project_id, topic_name, message_constructor, **kwargs):
topic_path = f"projects/{project_id}/topics/{topic_name}"
message = message_constructor(**kwargs)
future = cls.get_client().publish(topic_path, message.encode("utf-8"))
try:
logging.info(f"Published a message to topic {topic_path}: " f"{message}")
return future.result()
except Exception as e:
logging.error(
f"Publishing to topic {topic_path} has failed with "
f"message {message} with exception: {e}"
)
raise e
class SecretManagerService:
_client = None
@classmethod
def get_client(cls):
if cls._client is None:
cls._client = secretmanager.SecretManagerServiceClient()
return cls._client
@classmethod
def get_secret(cls, project_id, secret_id, version_id="latest"):
name = cls.get_client().secret_version_path(project_id, secret_id, version_id)
response = cls.get_client().access_secret_version(name)
return response.payload.data.decode("UTF-8")
|
[
"google.cloud.secretmanager.SecretManagerServiceClient",
"logging.info",
"logging.error",
"google.cloud.pubsub.PublisherClient"
] |
[((233, 257), 'google.cloud.pubsub.PublisherClient', 'pubsub.PublisherClient', ([], {}), '()\n', (255, 257), False, 'from google.cloud import pubsub\n'), ((606, 675), 'logging.info', 'logging.info', (['f"""Published a message to topic {topic_path}: {message}"""'], {}), "(f'Published a message to topic {topic_path}: {message}')\n", (618, 675), False, 'import logging\n'), ((1083, 1125), 'google.cloud.secretmanager.SecretManagerServiceClient', 'secretmanager.SecretManagerServiceClient', ([], {}), '()\n', (1123, 1125), False, 'from google.cloud import secretmanager\n'), ((758, 872), 'logging.error', 'logging.error', (['f"""Publishing to topic {topic_path} has failed with message {message} with exception: {e}"""'], {}), "(\n f'Publishing to topic {topic_path} has failed with message {message} with exception: {e}'\n )\n", (771, 872), False, 'import logging\n')]
|
import numpy as np
import scipy
import scipy.stats
import csv
scores = np.load('regional_avgScore_nAD.npy')
print(scores.shape)
pool = [[0 for _ in range(scores.shape[1])] for _ in range(scores.shape[1])]
for i in range(scores.shape[1]-1):
for j in range(i+1, scores.shape[1]):
corr, _ = scipy.stats.pearsonr(scores[:, i], scores[:, j])
pool[i][j] = corr
pool[j][i] = corr
print(pool)
regions = \
['hippoR',
'hippoL',
'tempoR',
'tempoL',
'cerebeR',
'cerebeL',
'brainstem',
'insulaR',
'insulaL',
'occiR',
'occiL',
'frontR',
'frontL',
'parieR',
'parieL',
'ventri']
with open('nAD_correlation.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([''] + regions)
for i in range(len(regions)):
spamwriter.writerow([regions[i]] + pool[i])
|
[
"numpy.load",
"csv.writer",
"scipy.stats.pearsonr"
] |
[((72, 108), 'numpy.load', 'np.load', (['"""regional_avgScore_nAD.npy"""'], {}), "('regional_avgScore_nAD.npy')\n", (79, 108), True, 'import numpy as np\n'), ((679, 755), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""" """', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (689, 755), False, 'import csv\n'), ((303, 351), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['scores[:, i]', 'scores[:, j]'], {}), '(scores[:, i], scores[:, j])\n', (323, 351), False, 'import scipy\n')]
|
#!/usr/bin/env python
# coding=utf-8
__author__ = 'Xevaquor'
__license__ = 'MIT'
from layout import *
from copy import deepcopy
Moves = {
'North' : (0, -1),
'South' : (0, 1),
'East': (1, 0),
'West' : (-1, 0),
'Stop' : (0, 0)
}
class AgentStatus(object):
def __init__(self, pos = (0,0), scared = False):
self.position = pos
self.is_scared = scared
class PacmanGameState(object):
def __init__(self, food):
# 0 - pacman
# >0 - ghost
self.agents = [None, None]
self.power_pellets = []
self.food = deepcopy(food)
class PacmanGame(object):
def __init__(self, lay):
self.layout = lay
def get_initial_game_state(self):
gs = PacmanGameState(self.layout.food)
gs.agents[0] = AgentStatus(pos=(3, 1), scared=False)
gs.agents[1] = AgentStatus(pos=(2, 1), scared=False)
return gs
def get_legal_moves(self, state, agent_index=0):
if agent_index != 0:
raise Exception("Not implemented!")
legal_moves = []
x, y = state.agents[agent_index].position
for m, d in Moves.items():
dx, dy = d
nx = x + dx
ny = y + dy
if nx >= 0 and nx < self.layout.cols and ny >= 0 and ny < self.layout.rows:
if self.layout.grid[ny][nx] != Tile.Wall:
legal_moves.append(m)
return legal_moves
def apply_move(self, state, move, agent_index=0):
# assert move is legal
# only move so far
dx, dy = Moves[move]
s = deepcopy(state)
s.agents[agent_index].position =( s.agents[agent_index].position[0] + dx,
s.agents[agent_index].position[1] + dy)
# eat food
if agent_index == 0:
x, y = s.agents[agent_index].position
s.food[y][x] = False
return s
def is_terminate(self, state):
pass
def pacman_won(self, state):
pass
def pacman_lose(self, state):
pass
def get_score(self, state):
pass
|
[
"copy.deepcopy"
] |
[((583, 597), 'copy.deepcopy', 'deepcopy', (['food'], {}), '(food)\n', (591, 597), False, 'from copy import deepcopy\n'), ((1593, 1608), 'copy.deepcopy', 'deepcopy', (['state'], {}), '(state)\n', (1601, 1608), False, 'from copy import deepcopy\n')]
|
import tensorflow as tf
import joblib
import numpy as np
import json
import traceback
import sys
import os
class Predictor(object):
def __init__(self):
self.loaded = False
def load(self):
print("Loading model",os.getpid())
self.model = tf.keras.models.load_model('model.h5', compile=False)
self.labelencoder = joblib.load('labelencoder.pkl')
self.loaded = True
print("Loaded model")
def predict(self, X,features_names):
# data = request.get("data", {}).get("ndarray")
# mult_types_array = np.array(data, dtype=object)
print ('step1......')
print(X)
X = tf.constant(X)
print ('step2......')
print(X)
if not self.loaded:
self.load()
# result = self.model.predict(X)
try:
result = self.model.predict(X)
except Exception as e:
print(traceback.format_exception(*sys.exc_info()))
raise # reraises the exception
print ('step3......')
result = tf.sigmoid(result)
print ('step4......')
print(result)
result = tf.math.argmax(result,axis=1)
print ('step5......')
print(result)
print(result.shape)
print(self.labelencoder.inverse_transform(result))
print ('step6......')
return json.dumps(result.numpy(), cls=JsonSerializer)
class JsonSerializer(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (
np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
|
[
"tensorflow.keras.models.load_model",
"os.getpid",
"tensorflow.math.argmax",
"tensorflow.constant",
"json.JSONEncoder.default",
"sys.exc_info",
"joblib.load",
"tensorflow.sigmoid"
] |
[((274, 327), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model.h5"""'], {'compile': '(False)'}), "('model.h5', compile=False)\n", (300, 327), True, 'import tensorflow as tf\n'), ((356, 387), 'joblib.load', 'joblib.load', (['"""labelencoder.pkl"""'], {}), "('labelencoder.pkl')\n", (367, 387), False, 'import joblib\n'), ((662, 676), 'tensorflow.constant', 'tf.constant', (['X'], {}), '(X)\n', (673, 676), True, 'import tensorflow as tf\n'), ((1074, 1092), 'tensorflow.sigmoid', 'tf.sigmoid', (['result'], {}), '(result)\n', (1084, 1092), True, 'import tensorflow as tf\n'), ((1162, 1192), 'tensorflow.math.argmax', 'tf.math.argmax', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (1176, 1192), True, 'import tensorflow as tf\n'), ((1878, 1913), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1902, 1913), False, 'import json\n'), ((240, 251), 'os.getpid', 'os.getpid', ([], {}), '()\n', (249, 251), False, 'import os\n'), ((950, 964), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (962, 964), False, 'import sys\n')]
|
# Generated by Django 3.2.3 on 2021-05-14 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_core", "0020_source_user_matching_mode"),
]
operations = [
migrations.AlterField(
model_name="application",
name="slug",
field=models.SlugField(
help_text="Internal application name, used in URLs.", unique=True
),
),
]
|
[
"django.db.models.SlugField"
] |
[((353, 440), 'django.db.models.SlugField', 'models.SlugField', ([], {'help_text': '"""Internal application name, used in URLs."""', 'unique': '(True)'}), "(help_text='Internal application name, used in URLs.',\n unique=True)\n", (369, 440), False, 'from django.db import migrations, models\n')]
|
# !/usr/bin/dev python
# -*- coding:utf-8 -*-
from datetime import datetime
import sys
def LogFile(logFile, target_url):
filePoint = open('{}'.format(logFile), 'a')
filePoint.write('----------------------------\n\n')
for i in sys.argv:
filePoint.write(i + ' ')
filePoint.write('\n')
filePoint.write('[*] Crawling URL : {} at :{}\n\n'.format(target_url, str(datetime.now())[: -7]))
filePoint.close()
|
[
"datetime.datetime.now"
] |
[((388, 402), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (400, 402), False, 'from datetime import datetime\n')]
|
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict
import pandas as pd
import plotly.graph_objects as go
def plot_alns_history(solution_costs: List[Tuple[int, int]], lined: bool = False, legend: str = "") -> None:
x, y = zip(*solution_costs)
plt.figure(figsize=(10, 7)) # (8, 6) is default
plt.scatter(x, y, s=7, alpha=0.4, c='black')
if lined:
plt.plot(x, y, label=legend)
if legend != "":
plt.legend()
# plt.yscale('log')
plt.show()
def plot_operator_weights(operator_scores: Dict[str, List[float]], x_values: List[int] = None) -> None:
# plt.figure(figsize=(10, 7)) # (8, 6) is default
legend = []
for operator, scores in operator_scores.items():
if x_values:
plt.plot(x_values, scores)
else:
plt.plot(scores)
legend.append(_get_operator_legend_name(operator))
plt.legend(legend)
plt.xlabel("Iteration")
plt.show()
def _get_operator_legend_name(operator_name: str) -> str:
mapping = {
'true': 'insertion with noise',
'false': 'insertion without noise',
'r_greedy': 'greedy',
'r_2regret': '2-regret',
'r_3regret': '3-regret',
'd_random': 'random',
'd_worst': 'worst',
'd_voyage_random': 'random voyage',
'd_voyage_worst': 'worst voyage',
'd_route_random': 'random route',
'd_route_worst': 'worst route',
'd_related_location_time': 'spatial temporal related',
'd_related_location_precedence': 'spatial disease related',
}
return mapping[operator_name]
def plot_alns_history_with_production_feasibility(solution_costs: List[Tuple[int, int]],
production_feasibility: List[bool]) -> None:
df = pd.DataFrame(dict(iter=[elem[0] for elem in solution_costs],
cost=[elem[1] for elem in solution_costs],
feasible=production_feasibility))
fig, ax = plt.subplots()
colors = {False: 'red', True: 'green'}
ax.scatter(df['iter'], df['cost'], c=df['feasible'].apply(lambda x: colors[x]))
plt.show()
def plot_locations(locations_ids: List[str], special_locations: List[Tuple[float, float]] = None, save_to: str=None):
# Special locations:
# 0482: (59.3337534309431, 5.30413145167106), 2022: (11.2786502472518,64.857954476573), 2015: (15.0646427525587,68.9141123038669)
loc_data = pd.read_csv('../../data/locations.csv')
loc_data.set_index("loknr", inplace=True)
farm_size = 7
factory_size = 15
farm_color = '#0067b5' #skyblue'
factory_color = 'black'
factory_marker = 'square'
farm_marker = 'circle'
relevant_locations_and_coords = [(loc_id,
loc_data.loc[int(loc_id), "breddegrader"],
loc_data.loc[int(loc_id), "lengdegrader"], farm_color, farm_size, farm_marker)
for loc_id in locations_ids if int(loc_id) in loc_data.index]
relevant_locations_and_coords += [(000, coord[0], coord[1], factory_color, factory_size, factory_marker)
for coord in special_locations]
df = pd.DataFrame(relevant_locations_and_coords)
df.columns = ['loc_id', 'lat', 'long', 'color', 'size', 'marker']
# color = c if c else "LightSkyBlue"
# with open("../../data/custom.geo.json", "r", encoding="utf-8") as f:
# geometry = geojson.load(f)
# pprint(geometry)
# trace1 = go.Choropleth(geojson=geometry,
# locations=["Norway"],
# z=[0],
# text=['Norway-text']
# )
trace2 = go.Scattergeo(
lon=df['long'],
lat=df['lat'],
text=df['loc_id'],
mode='markers',
marker=dict(
color=df['color'],
size=df['size'],
symbol=df['marker'],
line=dict(color='black', width=0),
opacity=1
)
)
fig = go.Figure([trace2])
# fig.update_layout(
# title='Locations',
# geo_scope='europe',
# )
fig.update_geos(
fitbounds="locations",
resolution=50,
# visible=False,
showframe=False,
projection={"type": "mercator"},
)
if save_to: # Save figure
fig.write_html(save_to)
fig.show()
def plot_clustered_locations(locations_ids_list: List[List[str]],
special_locations_list: List[List[Tuple[float, float]]] = None, save_to: str=None):
loc_data = pd.read_csv('../../data/locations.csv')
loc_data.set_index("loknr", inplace=True)
farm_size = 9
factory_size = 15
factory_color = 'black'
farm_colors = ['#0067b5', '#e67512', '#006700', '#bb00bb']
factory_marker = 'square'
farm_marker = 'circle'
traces = []
for locations_ids, special_locations, farm_color in zip(locations_ids_list, special_locations_list, farm_colors):
relevant_locations_and_coords = [(loc_id,
loc_data.loc[int(loc_id), "breddegrader"],
loc_data.loc[int(loc_id), "lengdegrader"], farm_color, farm_size, farm_marker)
for loc_id in locations_ids if int(loc_id) in loc_data.index]
relevant_locations_and_coords += [(000, coord[0], coord[1], factory_color, factory_size, factory_marker)
for coord in special_locations]
df = pd.DataFrame(relevant_locations_and_coords)
df.columns = ['loc_id', 'lat', 'long', 'color', 'size', 'marker']
# color = c if c else "LightSkyBlue"
trace = go.Scattergeo(
lon=df['long'],
lat=df['lat'],
text=df['loc_id'],
mode='markers',
marker=dict(
color=df['color'],
size=df['size'],
symbol=df['marker'],
line=dict(color='black', width=0),
opacity=1
)
)
traces.append(trace)
fig = go.Figure(traces)
# fig.update_layout(
# title='Locations',
# geo_scope='europe',
# )
fig.update_geos(
fitbounds="locations",
resolution=50,
# visible=False,
showframe=False,
projection={"type": "mercator"},
)
if save_to: # Save figure
fig.write_html(save_to)
fig.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"plotly.graph_objects.Figure",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((270, 297), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (280, 297), True, 'import matplotlib.pyplot as plt\n'), ((323, 367), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(7)', 'alpha': '(0.4)', 'c': '"""black"""'}), "(x, y, s=7, alpha=0.4, c='black')\n", (334, 367), True, 'import matplotlib.pyplot as plt\n'), ((489, 499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (497, 499), True, 'import matplotlib.pyplot as plt\n'), ((897, 915), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {}), '(legend)\n', (907, 915), True, 'import matplotlib.pyplot as plt\n'), ((920, 943), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (930, 943), True, 'import matplotlib.pyplot as plt\n'), ((948, 958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (956, 958), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2030), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2028, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2163, 2173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2171, 2173), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2507), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/locations.csv"""'], {}), "('../../data/locations.csv')\n", (2479, 2507), True, 'import pandas as pd\n'), ((3247, 3290), 'pandas.DataFrame', 'pd.DataFrame', (['relevant_locations_and_coords'], {}), '(relevant_locations_and_coords)\n', (3259, 3290), True, 'import pandas as pd\n'), ((4138, 4157), 'plotly.graph_objects.Figure', 'go.Figure', (['[trace2]'], {}), '([trace2])\n', (4147, 4157), True, 'import plotly.graph_objects as go\n'), ((4697, 4736), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/locations.csv"""'], {}), "('../../data/locations.csv')\n", (4708, 4736), True, 'import pandas as pd\n'), ((6293, 6310), 'plotly.graph_objects.Figure', 'go.Figure', (['traces'], {}), '(traces)\n', (6302, 6310), True, 'import plotly.graph_objects as go\n'), ((390, 418), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'legend'}), '(x, y, label=legend)\n', (398, 418), True, 'import matplotlib.pyplot as plt\n'), ((448, 460), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (458, 460), True, 'import matplotlib.pyplot as plt\n'), ((5664, 5707), 'pandas.DataFrame', 'pd.DataFrame', (['relevant_locations_and_coords'], {}), '(relevant_locations_and_coords)\n', (5676, 5707), True, 'import pandas as pd\n'), ((764, 790), 'matplotlib.pyplot.plot', 'plt.plot', (['x_values', 'scores'], {}), '(x_values, scores)\n', (772, 790), True, 'import matplotlib.pyplot as plt\n'), ((817, 833), 'matplotlib.pyplot.plot', 'plt.plot', (['scores'], {}), '(scores)\n', (825, 833), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Processor for performing named entity tagging.
"""
from stanfordnlp.models.common.pretrain import Pretrain
from stanfordnlp.models.common import doc
from stanfordnlp.models.common.utils import unsort
from stanfordnlp.models.ner.data import DataLoader
from stanfordnlp.models.ner.trainer import Trainer
from stanfordnlp.pipeline._constants import *
from stanfordnlp.pipeline.processor import UDProcessor
class NERProcessor(UDProcessor):
# set of processor requirements this processor fulfills
PROVIDES_DEFAULT = set([NER])
# set of processor requirements for this processor
REQUIRES_DEFAULT = set([TOKENIZE])
def _set_up_model(self, config, use_gpu):
# set up trainer
self._args = {'charlm_forward_file': config['forward_charlm_path'], 'charlm_backward_file': config['backward_charlm_path']}
self._pretrain = Pretrain(config['pretrain_path'])
self._trainer = Trainer(args=self._args, pretrain=self.pretrain, model_file=config['model_path'], use_cuda=use_gpu)
def process(self, document):
# set up a eval-only data loader and skip tag preprocessing
batch = DataLoader(
document, self.config['batch_size'], self.config, vocab=self.vocab, evaluation=True, preprocess_tags=False)
preds = []
for b in batch:
preds += self.trainer.predict(b)
# Append previous 'misc' values.
misc = batch.conll.get(['misc'])
idx = 0
for i, sent in enumerate(preds):
for j, ner_pred in enumerate(sent):
ner_pred = 'NER=' + ner_pred
misc_val = misc[idx]
if misc_val != '_':
preds[i][j] = ner_pred + '|' + misc_val
else:
preds[i][j] = ner_pred
idx += 1
batch.conll.set(['misc'], [y for x in preds for y in x])
|
[
"stanfordnlp.models.ner.data.DataLoader",
"stanfordnlp.models.ner.trainer.Trainer",
"stanfordnlp.models.common.pretrain.Pretrain"
] |
[((861, 894), 'stanfordnlp.models.common.pretrain.Pretrain', 'Pretrain', (["config['pretrain_path']"], {}), "(config['pretrain_path'])\n", (869, 894), False, 'from stanfordnlp.models.common.pretrain import Pretrain\n'), ((919, 1023), 'stanfordnlp.models.ner.trainer.Trainer', 'Trainer', ([], {'args': 'self._args', 'pretrain': 'self.pretrain', 'model_file': "config['model_path']", 'use_cuda': 'use_gpu'}), "(args=self._args, pretrain=self.pretrain, model_file=config[\n 'model_path'], use_cuda=use_gpu)\n", (926, 1023), False, 'from stanfordnlp.models.ner.trainer import Trainer\n'), ((1137, 1260), 'stanfordnlp.models.ner.data.DataLoader', 'DataLoader', (['document', "self.config['batch_size']", 'self.config'], {'vocab': 'self.vocab', 'evaluation': '(True)', 'preprocess_tags': '(False)'}), "(document, self.config['batch_size'], self.config, vocab=self.\n vocab, evaluation=True, preprocess_tags=False)\n", (1147, 1260), False, 'from stanfordnlp.models.ner.data import DataLoader\n')]
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ppt_maker",
version="0.0.1",
author="<NAME>, <NAME>",
author_email="<EMAIL>",
description="Make PowerPoint slides with template and data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nyuspc/ppt_maker",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Financial and Insurance Industry",
"Topic :: Multimedia :: Graphics",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"setuptools.find_packages"
] |
[((439, 465), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (463, 465), False, 'import setuptools\n')]
|
import os
import argparse
import numpy as np
import pymatgen
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("mp_dir", help="Root directory with Materials Project dataset")
parser.add_argument("radial_cutoff", type=float, help="Radius of sphere that decides neighborhood")
args = parser.parse_args()
mp_dir = args.mp_dir
r_cut = args.radial_cutoff
index = np.load(os.path.join(mp_dir, 'meta_derived', f'index_connected_{r_cut}.npy'))
mp_cif_dir = os.path.join(mp_dir, "cif")
mp_save_dir = os.path.join(mp_dir, f"derived_radial_cutoff_{r_cut}")
def get_max_atomic_number(cif_paths):
max_atomic_number = -1
for cif_path in tqdm(cif_paths):
structure = pymatgen.Structure.from_file(cif_path)
max_atomic_number = max(max_atomic_number, max(structure.atomic_numbers))
return max_atomic_number
def process_cif(cif_path):
structure = pymatgen.Structure.from_file(cif_path)
return np.array(structure.atomic_numbers)
cif_paths = [os.path.join(mp_cif_dir, filename) for filename in index]
max_atomic_number = get_max_atomic_number(cif_paths)
atom_type_mask = np.zeros((len(cif_paths), max_atomic_number+1), dtype=np.bool)
for i, cif_path in enumerate(tqdm(cif_paths)):
atom_type_mask[i, process_cif(cif_path)] = True
np.save(os.path.join(mp_save_dir, "atom_type_mask.npy"), atom_type_mask)
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"pymatgen.Structure.from_file",
"numpy.array",
"os.path.join"
] |
[((93, 118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (116, 118), False, 'import argparse\n'), ((479, 506), 'os.path.join', 'os.path.join', (['mp_dir', '"""cif"""'], {}), "(mp_dir, 'cif')\n", (491, 506), False, 'import os\n'), ((521, 575), 'os.path.join', 'os.path.join', (['mp_dir', 'f"""derived_radial_cutoff_{r_cut}"""'], {}), "(mp_dir, f'derived_radial_cutoff_{r_cut}')\n", (533, 575), False, 'import os\n'), ((396, 464), 'os.path.join', 'os.path.join', (['mp_dir', '"""meta_derived"""', 'f"""index_connected_{r_cut}.npy"""'], {}), "(mp_dir, 'meta_derived', f'index_connected_{r_cut}.npy')\n", (408, 464), False, 'import os\n'), ((662, 677), 'tqdm.tqdm', 'tqdm', (['cif_paths'], {}), '(cif_paths)\n', (666, 677), False, 'from tqdm import tqdm\n'), ((898, 936), 'pymatgen.Structure.from_file', 'pymatgen.Structure.from_file', (['cif_path'], {}), '(cif_path)\n', (926, 936), False, 'import pymatgen\n'), ((948, 982), 'numpy.array', 'np.array', (['structure.atomic_numbers'], {}), '(structure.atomic_numbers)\n', (956, 982), True, 'import numpy as np\n'), ((998, 1032), 'os.path.join', 'os.path.join', (['mp_cif_dir', 'filename'], {}), '(mp_cif_dir, filename)\n', (1010, 1032), False, 'import os\n'), ((1219, 1234), 'tqdm.tqdm', 'tqdm', (['cif_paths'], {}), '(cif_paths)\n', (1223, 1234), False, 'from tqdm import tqdm\n'), ((1302, 1349), 'os.path.join', 'os.path.join', (['mp_save_dir', '"""atom_type_mask.npy"""'], {}), "(mp_save_dir, 'atom_type_mask.npy')\n", (1314, 1349), False, 'import os\n'), ((699, 737), 'pymatgen.Structure.from_file', 'pymatgen.Structure.from_file', (['cif_path'], {}), '(cif_path)\n', (727, 737), False, 'import pymatgen\n')]
|
# Generated by Django 3.0.6 on 2020-07-17 19:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lims', '0059_auto_20200717_1318'),
]
operations = [
migrations.RenameField(
model_name='supportrecord',
old_name='user',
new_name='project',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((224, 315), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""supportrecord"""', 'old_name': '"""user"""', 'new_name': '"""project"""'}), "(model_name='supportrecord', old_name='user',\n new_name='project')\n", (246, 315), False, 'from django.db import migrations\n')]
|
"""
Copyright (c) 2018, University of Oxford, Rama Cont and ETH Zurich, <NAME>
This module provides the helper functions and the class LOBSTERReader, a subclass of OBReader to read in limit order book data in lobster format.
"""
######
# Imports
######
import csv
import math
import warnings
import numpy as np
from lobpy.datareader.orderbook import *
# LOBSTER specific file name functions
def _split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2,_ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def split_lobster_filename(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
return _split_lobster_filename(filename)
def _split_lobster_filename_core(filename):
""" splits the LOBSTER-type filename into Ticker, Date, Time Start, Time End, File Type, Number of Levels """
filename2, _ = filename.split(".")
ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels = filename2.split("_")
return ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels
def _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return "_".join((ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels))
def create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels):
return _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, file_type_str, num_levels)
def _get_time_stamp_before(time_stamps, time_stamp):
''' Returns the value and index of the last time point in time_stamps before or equal time_stamp '''
time = time_stamps[0]
index = int(0)
if time == time_stamp:
# time_stamp found at index 0
return time, index
if time > time_stamp:
raise LookupError("Time stamp data start at {} which is after time_stamps: {}".format(time, time_stamp))
for ctr, time_now in enumerate(time_stamps[1:]):
if time_now > time_stamp:
return time, ctr
time = time_now
return time, ctr+1
class LOBSTERReader(OBReader):
"""
OBReader object specified for using LOBSTER files
----------
params:
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str,
time_end_calc_str
Example usage:
to create an object
>>> lobreader = LOBSTERReader("SYMBOL", "2012-06-21", "34200000", "57600000", "10")
read market depth on uniform time grid with num_observation number of observations
>>> dt, time_stamps, depth_bid, depth_ask = lobreader.load_marketdepth(num_observations)
read price process on that time grid specified above
>>> dt2, time_stamps2, price_mid, price_bid, price_ask = lobreader.load_marketdepth(None)
"""
def __init__(
self,
ticker_str,
date_str,
time_start_str,
time_end_str,
num_levels_str,
time_start_calc_str="",
time_end_calc_str="",
num_levels_calc_str=""
):
self.ticker_str = ticker_str
self.date_str = date_str
self.lobfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, ORDERBOOK_FILE_ID, num_levels_str)
self.msgfilename = _create_lobster_filename(ticker_str, date_str, time_start_str, time_end_str, MESSAGE_FILE_ID, num_levels_str)
self.time_start = int(time_start_str)
self.time_end = int(time_end_str)
self.num_levels = int(num_levels_str)
self.time_start_calc = int(time_start_str)
self.time_end_calc = int(time_end_str)
self.num_levels_calc = int(num_levels_str)
if not (num_levels_calc_str == ""):
self.num_levels_calc = int(num_levels_calc_str)
self.data = dict()
if not (time_start_calc_str == ""):
self.time_start_calc = int(time_start_calc_str)
if not (time_end_calc_str == ""):
self.time_end_calc = int(time_end_calc_str)
def set_timecalc(self, time_start_calc_str, time_end_calc_str):
self.time_start_calc = int(time_start_calc_str)
self.time_end_calc = int(time_end_calc_str)
return True
def create_filestr(self, identifier_str, num_levels=None):
""" Creates lobster type file string """
if num_levels is None:
num_levels = self.num_levels
return _create_lobster_filename(self.ticker_str, self.date_str, str(self.time_start_calc), str(self.time_end_calc), identifier_str, str(num_levels))
def average_profile_tt(self, num_levels_calc_str="" , write_outputfile = False):
""" Computes the average order book profile, averaged over trading time, from the csv sourcefile. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation
----------
args:
num_levels_calc: number of levels which should be considered for the output
write_output: if True, then the average order book profile is stored as a csv file
----------
output:
(mean_bid, mean_ask) in format of numpy arrays
"""
print("Starting computation of average order book profile in file %s."%self.lobfilename)
num_levels_calc = self.num_levels
if not(num_levels_calc_str == ""):
num_levels_calc = int(num_levels_calc_str)
if self.num_levels < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_levels, num_levels_calc))
tempval1 = 0.0
tempval2 = 0.0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
mean = np.zeros(num_levels_calc * 2) # running mean
with open(self.lobfilename+".csv", newline='') as csvfile:
lobdata = csv.reader(csvfile, delimiter=',')
num_lines = sum(1 for row in lobdata)
print("Loaded successfully. Number of lines: " + str(num_lines))
csvfile.seek(0) # reset iterator to beginning of the file
print("Start calculation.")
for row in lobdata: # data are read as list of strings
currorders = np.fromiter(row[1:(4*num_levels_calc + 1):2], np.float) # parse to integer
for ctr, currorder in enumerate(currorders):
#print(lobstate)
tempval1 = currorder / num_lines - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join(("ttime-"+AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr("-".join(("ttime",AV_ORDERBOOK_FILE_ID)) , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def average_profile(
self,
num_levels_calc_str="",
write_outputfile = False
):
""" Returns the average oder book profile from the csv sourcefile, averaged in real time. To avoid numerical errors by summing up large numbers, the Kahan Summation algorithm is used for mean computation """
if num_levels_calc_str == "":
num_levels_calc = self.num_levels_calc
else:
num_levels_calc = int(num_levels_calc_str)
if int(self.num_levels) < num_levels_calc:
raise DataRequestError("Number of levels in data ({0}) is smaller than number of levels requested for calculation ({1}).".format(self.num_level, num_levels_calc))
time_start = float(self.time_start_calc / 1000.)
time_end = float(self.time_end_calc / 1000.)
mean = np.zeros(num_levels_calc * 2) # running mean
tempval1 = 0.0
tempval2 = 0.0
linectr = 0
comp = np.zeros(num_levels_calc * 2) # compensator for lost low-order bits
flag = 0
with open(".".join((self.lobfilename, 'csv')), newline='') as orderbookfile, open(".".join((self.msgfilename, 'csv')), newline='') as messagefile:
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
nexttime = float(rowMES[0]) # t(0)
if time_end < nexttime:
# In this case there are no entries in the file for the selected time interval. Array of 0s is returned
warnings.warn("The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean.")
return mean[1::2], mean[0::2]
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only at t(0)
if time_start <= nexttime:
flag = 1
for rowLOB, rowMES in zip(lobdata,messagedata): # data are read as list of string, iterator now starts at second entry (since first has been exhausted above)
currtime = nexttime #(t(i))
nexttime = float(rowMES[0]) #(t(i+1))
if flag == 0:
if time_start <= nexttime:
# Start calculation
flag = 1
currtime = time_start
for ctr, currbucket in enumerate(currprofile):
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
else:
if time_end < nexttime:
# Finish calculation
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(currprofile)
tempval1 = (nexttime - currtime) / float(time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
if time_end == nexttime:
# Finish calculation
break
## Update order book to time t(i+1)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2],np.float) # parse to integer, extract bucket volumes only
else: # executed only when not quitted by break, i.e. time_end >= time at end of file in this case we extrapolate
warnings.warn("Extrapolated order book data since time_end exceed time at end of the file by %f seconds."%(time_end - nexttime))
currtime = nexttime
nexttime = time_end
for ctr, currbucket in enumerate(currprofile):
#print(lobstate)
tempval1 = (nexttime - currtime) / (time_end - time_start) * currbucket - comp[ctr]
tempval2 = mean[ctr] + tempval1
comp[ctr] = (tempval2 - mean[ctr]) - tempval1
mean[ctr] = tempval2
print("Calculation finished.")
# Add data to self.data
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "bid")), mean[1::2])
self.add_data("--".join((AV_ORDERBOOK_FILE_ID, "ask")), mean[0::2])
if not write_outputfile:
return mean[1::2], mean[0::2] # LOBster format: bid data at odd * 2, LOBster format: ask data at even * 2
print("Write output file.")
outfilename = self.create_filestr(AV_ORDERBOOK_FILE_ID , str(num_levels_calc))
outfilename = ".".join((outfilename,'csv'))
with open(outfilename, 'w') as outfile:
wr = csv.writer(outfile)
wr.writerow(mean[1::2]) # LOBster format: bid data at odd * 2
wr.writerow(mean[0::2]) # LOBster format: ask data at even * 2
print("Average order book saved as %s."%outfilename)
return mean[1::2], mean[0::2]
def _load_ordervolume(
self,
num_observations,
num_levels_calc,
profile2vol_fct=np.sum
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = profile2vol_fct(currprofile[1::2])
volume_ask[ctr_obs] = profile2vol_fct(currprofile[0::2])
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_levelx(
self,
num_observations,
level
):
''' Extracts the volume of orders in the first num_level buckets at a uniform time grid of num_observations observations from the interval [time_start_calc, time_end_calc]. The volume process is extrapolated constantly on the last level in the file, for the case that time_end_calc is larger than the last time stamp in the file. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean.
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
volume_bid = np.zeros(num_observations)
volume_ask = np.zeros(num_observations)
# Ask level x is at position (x-1)*4 + 1, bid level x is at position (x-1)*4 + 3
x_bid = (int(level) - 1) * 4 + 3
x_ask = (int(level) - 1) * 4 + 1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float)
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of volume in the file. Keep processes constant
if (ctr_obs > 0):
volume_bid[ctr_obs] = volume_bid[ctr_obs-1]
volume_ask[ctr_obs] = volume_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
volume_bid[ctr_obs] = 0.
volume_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# extract order volume from profile
volume_bid[ctr_obs] = currbid
volume_ask[ctr_obs] = currask
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update currprofile and time_file
#currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
currbid = float(rowLOB[x_bid])
currask = float(rowLOB[x_ask])
time_file = float(rowMES[0])
if (file_ended_line < num_observations):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
return dt, time_stamps, volume_bid, volume_ask
def _load_ordervolume_full(
self,
num_levels_calc,
profile2vol_fct=np.sum,
ret_np=True
):
''' Extracts the volume of orders in the first num_level buckets from the interval [time_start_calc, time_end_calc]. profile2vol_fct allows to specify how the volume should be summarized from the profile. Typical choices are np.sum or np.mean. If ret_np==False then the output format are lists, else numpy arrays
Note: Due to possibly large amount of data we iterate through the file instead of reading the whole file into an array.
'''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
time_stamps = []
volume_bid = []
volume_ask = []
index_start = -1
index_end = -1
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
for ctrRow, (rowLOB, rowMES) in enumerate(zip(lobdata, messagedata)):
time_now = float(rowMES[0])
if (index_start == -1) and (time_now >= time_start_calc):
index_start = ctrRow
if (index_end == -1) and (time_now > time_end_calc):
index_end = ctrRow
break
time_stamps.append(time_now)
currprofile = np.fromiter(rowLOB[1:(4*num_levels_calc + 1):2], np.float) # parse to integer, extract bucket volumes only
volume_bid.append(profile2vol_fct(currprofile[1::2]))
volume_ask.append(profile2vol_fct(currprofile[0::2]))
if index_end == -1:
#file end reached
index_end = len(time_stamps)
if ret_np:
return np.array(time_stamps[index_start:index_end]), np.array(volume_bid[index_start:index_end]), np.array(volume_ask[index_start:index_end])
return time_stamps[index_start:index_end], volume_bid[index_start:index_end], volume_ask[index_start:index_end]
def _load_prices(
self,
num_observations
):
''' private method to implement how the price data are loaded from the files '''
time_start_calc = float(self.time_start_calc) / 1000.
time_end_calc = float(self.time_end_calc) / 1000.
file_ended_line = int(num_observations)
ctr_time = 0
ctr_line = 0
ctr_obs = 0 # counter for the outer of the
time_stamps, dt = np.linspace(time_start_calc, time_end_calc, num_observations, retstep = True)
prices_bid = np.empty(num_observations)
prices_ask = np.empty(num_observations)
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
time_file = float(rowMES[0])
for ctr_obs, time_stamp in enumerate(time_stamps):
if (time_stamp < time_file):
# no update of prices in the file. Keep processes constant
if (ctr_obs > 0):
prices_bid[ctr_obs] = prices_bid[ctr_obs-1]
prices_ask[ctr_obs] = prices_ask[ctr_obs-1]
else:
# so far no data available, raise warning and set processes to 0.
warnings.warn("Data do not contain beginning of the monitoring period. Values set to 0.", RuntimeWarning)
prices_bid[ctr_obs] = 0.
prices_ask[ctr_obs] = 0.
continue
while(time_stamp >= time_file):
# LOBster stores best ask and bid price in resp. 1st and 3rd column, price in unit USD*10000
prices_bid[ctr_obs] = float(rowLOB[2]) / float(10000)
prices_ask[ctr_obs] = float(rowLOB[0]) / float(10000)
# read next line
try:
rowMES = next(messagedata) # data are read as list of strings
rowLOB = next(lobdata)
except StopIteration:
if (file_ended_line == num_observations):
file_ended_line = ctr_obs
break
# update time_file
time_file = float(rowMES[0])
if (file_ended_line < num_observations-1):
warnings.warn("End of file reached. Number of values constantly extrapolated: %i"%(num_observations - file_ended_line), RuntimeWarning)
while ctr_obs < (num_observations-1):
prices_bid[ctr_obs+1] = prices_bid[ctr_obs]
prices_ask[ctr_obs+1] = prices_ask[ctr_obs]
return dt, time_stamps, prices_bid, prices_ask
def _load_profile_snapshot_lobster(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
#convert time from msec to sec
time_stamp = float(time_stamp) / 1000.
if num_levels_calc is None:
num_levels_calc = self.num_levels_calc
with open((self.lobfilename + '.csv')) as orderbookfile, open(self.msgfilename + '.csv') as messagefile:
# Read data from csv file
lobdata = csv.reader(orderbookfile, delimiter=',')
messagedata = csv.reader(messagefile, delimiter=',')
# get first row
# data are read as list of strings
rowMES = next(messagedata)
rowLOB = next(lobdata)
# parse to float, extract bucket volumes only
time_file = float(rowMES[0])
if time_file > time_stamp:
raise LookupError("Time data in the file start at {} which is after time_stamps: {}".format(time_file, time_stamp))
if time_file == time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
for rowMES in messagedata:
time_file = float(rowMES[0])
if time_file > time_stamp:
# file format is [ask level, ask volume, bid level, bid volume, ask level, ....]
#conversion of price levels to USD
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
break
rowLOB = next(lobdata)
else:
# time in file did not exceed time stamp to the end. Return last entries of the file
bid_prices = np.fromiter(rowLOB[2:(4*num_levels_calc):4], np.float) / float(10000)
bid_volume = np.fromiter(rowLOB[3:(4*num_levels_calc):4], np.float)
#conversion of price levels to USD
ask_prices = np.fromiter(rowLOB[0:(4*num_levels_calc):4], np.float) / float(10000)
ask_volume = np.fromiter(rowLOB[1:(4*num_levels_calc):4], np.float)
return bid_prices, bid_volume, ask_prices, ask_volume
def load_profile_snapshot(
self,
time_stamp,
num_levels_calc=None
):
''' Returns a two numpy arrays with snapshots of the bid- and ask-side of the order book at a given time stamp
Output:
bid_prices, bid_volume, ask_prices, ask_volume
'''
return self._load_profile_snapshot_lobster(time_stamp, num_levels_calc)
# END LOBSTERReader
|
[
"csv.reader",
"csv.writer",
"numpy.empty",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.fromiter",
"warnings.warn"
] |
[((6227, 6256), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (6235, 6256), True, 'import numpy as np\n'), ((6314, 6343), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (6322, 6343), True, 'import numpy as np\n'), ((9228, 9257), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (9236, 9257), True, 'import numpy as np\n'), ((9357, 9386), 'numpy.zeros', 'np.zeros', (['(num_levels_calc * 2)'], {}), '(num_levels_calc * 2)\n', (9365, 9386), True, 'import numpy as np\n'), ((15093, 15168), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (15104, 15168), True, 'import numpy as np\n'), ((15192, 15218), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (15200, 15218), True, 'import numpy as np\n'), ((15240, 15266), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (15248, 15266), True, 'import numpy as np\n'), ((18846, 18921), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (18857, 18921), True, 'import numpy as np\n'), ((18945, 18971), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (18953, 18971), True, 'import numpy as np\n'), ((18993, 19019), 'numpy.zeros', 'np.zeros', (['num_observations'], {}), '(num_observations)\n', (19001, 19019), True, 'import numpy as np\n'), ((24748, 24823), 'numpy.linspace', 'np.linspace', (['time_start_calc', 'time_end_calc', 'num_observations'], {'retstep': '(True)'}), '(time_start_calc, time_end_calc, num_observations, retstep=True)\n', (24759, 24823), True, 'import numpy as np\n'), ((24847, 24873), 'numpy.empty', 'np.empty', (['num_observations'], {}), '(num_observations)\n', (24855, 24873), True, 'import numpy as np\n'), ((24895, 24921), 'numpy.empty', 'np.empty', (['num_observations'], {}), '(num_observations)\n', (24903, 24921), True, 'import numpy as np\n'), ((6452, 6486), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (6462, 6486), False, 'import csv\n'), ((9628, 9668), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (9638, 9668), False, 'import csv\n'), ((9695, 9733), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (9705, 9733), False, 'import csv\n'), ((10323, 10381), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (10334, 10381), True, 'import numpy as np\n'), ((13738, 13757), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (13748, 13757), False, 'import csv\n'), ((15441, 15481), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (15451, 15481), False, 'import csv\n'), ((15508, 15546), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (15518, 15546), False, 'import csv\n'), ((15788, 15846), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (15799, 15846), True, 'import numpy as np\n'), ((17626, 17773), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (17639, 17773), False, 'import warnings\n'), ((19375, 19415), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (19385, 19415), False, 'import csv\n'), ((19442, 19480), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (19452, 19480), False, 'import csv\n'), ((21697, 21844), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (21710, 21844), False, 'import warnings\n'), ((22937, 22977), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (22947, 22977), False, 'import csv\n'), ((23004, 23042), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (23014, 23042), False, 'import csv\n'), ((25096, 25136), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (25106, 25136), False, 'import csv\n'), ((25163, 25201), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (25173, 25201), False, 'import csv\n'), ((27049, 27196), 'warnings.warn', 'warnings.warn', (["('End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line))", 'RuntimeWarning'], {}), "(\n 'End of file reached. Number of values constantly extrapolated: %i' % (\n num_observations - file_ended_line), RuntimeWarning)\n", (27062, 27196), False, 'import warnings\n'), ((28085, 28125), 'csv.reader', 'csv.reader', (['orderbookfile'], {'delimiter': '""","""'}), "(orderbookfile, delimiter=',')\n", (28095, 28125), False, 'import csv\n'), ((28152, 28190), 'csv.reader', 'csv.reader', (['messagefile'], {'delimiter': '""","""'}), "(messagefile, delimiter=',')\n", (28162, 28190), False, 'import csv\n'), ((6836, 6891), 'numpy.fromiter', 'np.fromiter', (['row[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(row[1:4 * num_levels_calc + 1:2], np.float)\n', (6847, 6891), True, 'import numpy as np\n'), ((7989, 8008), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (7999, 8008), False, 'import csv\n'), ((10102, 10247), 'warnings.warn', 'warnings.warn', (['"""The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean."""'], {}), "(\n 'The first entry in the data files is after the end of the selected time period. Arrays of 0s will be returned as mean.'\n )\n", (10115, 10247), False, 'import warnings\n'), ((12231, 12289), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (12242, 12289), True, 'import numpy as np\n'), ((12514, 12654), 'warnings.warn', 'warnings.warn', (["('Extrapolated order book data since time_end exceed time at end of the file by %f seconds.'\n % (time_end - nexttime))"], {}), "(\n 'Extrapolated order book data since time_end exceed time at end of the file by %f seconds.'\n % (time_end - nexttime))\n", (12527, 12654), False, 'import warnings\n'), ((23619, 23677), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (23630, 23677), True, 'import numpy as np\n'), ((24028, 24072), 'numpy.array', 'np.array', (['time_stamps[index_start:index_end]'], {}), '(time_stamps[index_start:index_end])\n', (24036, 24072), True, 'import numpy as np\n'), ((24074, 24117), 'numpy.array', 'np.array', (['volume_bid[index_start:index_end]'], {}), '(volume_bid[index_start:index_end])\n', (24082, 24117), True, 'import numpy as np\n'), ((24119, 24162), 'numpy.array', 'np.array', (['volume_ask[index_start:index_end]'], {}), '(volume_ask[index_start:index_end])\n', (24127, 24162), True, 'import numpy as np\n'), ((28934, 28988), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (28945, 28988), True, 'import numpy as np\n'), ((29168, 29222), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (29179, 29222), True, 'import numpy as np\n'), ((30320, 30374), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (30331, 30374), True, 'import numpy as np\n'), ((30554, 30608), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (30565, 30608), True, 'import numpy as np\n'), ((17381, 17439), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc + 1:2]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc + 1:2], np.float)\n', (17392, 17439), True, 'import numpy as np\n'), ((28835, 28889), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (28846, 28889), True, 'import numpy as np\n'), ((29069, 29123), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (29080, 29123), True, 'import numpy as np\n'), ((29674, 29728), 'numpy.fromiter', 'np.fromiter', (['rowLOB[3:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[3:4 * num_levels_calc:4], np.float)\n', (29685, 29728), True, 'import numpy as np\n'), ((29920, 29974), 'numpy.fromiter', 'np.fromiter', (['rowLOB[1:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[1:4 * num_levels_calc:4], np.float)\n', (29931, 29974), True, 'import numpy as np\n'), ((30221, 30275), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (30232, 30275), True, 'import numpy as np\n'), ((30455, 30509), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (30466, 30509), True, 'import numpy as np\n'), ((16391, 16505), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (16404, 16505), False, 'import warnings\n'), ((20413, 20527), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (20426, 20527), False, 'import warnings\n'), ((25903, 26017), 'warnings.warn', 'warnings.warn', (['"""Data do not contain beginning of the monitoring period. Values set to 0."""', 'RuntimeWarning'], {}), "(\n 'Data do not contain beginning of the monitoring period. Values set to 0.',\n RuntimeWarning)\n", (25916, 26017), False, 'import warnings\n'), ((29571, 29625), 'numpy.fromiter', 'np.fromiter', (['rowLOB[2:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[2:4 * num_levels_calc:4], np.float)\n', (29582, 29625), True, 'import numpy as np\n'), ((29817, 29871), 'numpy.fromiter', 'np.fromiter', (['rowLOB[0:4 * num_levels_calc:4]', 'np.float'], {}), '(rowLOB[0:4 * num_levels_calc:4], np.float)\n', (29828, 29871), True, 'import numpy as np\n')]
|
from django.contrib.auth.decorators import permission_required
from django.contrib.messages import ERROR, add_message
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from itdagene.app.comments.forms import CommentForm
from itdagene.app.mail.tasks import send_comment_email
@permission_required("comments.add_comment")
def add(request):
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.date = timezone.now()
instance.save()
send_comment_email(instance)
return redirect(instance.object.get_absolute_url())
else:
add_message(request, ERROR, _("Could not post comment"))
object = form.instance.object
return redirect(object.get_absolute_url())
|
[
"django.contrib.auth.decorators.permission_required",
"django.utils.timezone.now",
"itdagene.app.mail.tasks.send_comment_email",
"itdagene.app.comments.forms.CommentForm",
"django.utils.translation.ugettext_lazy"
] |
[((356, 399), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""comments.add_comment"""'], {}), "('comments.add_comment')\n", (375, 399), False, 'from django.contrib.auth.decorators import permission_required\n'), ((466, 491), 'itdagene.app.comments.forms.CommentForm', 'CommentForm', (['request.POST'], {}), '(request.POST)\n', (477, 491), False, 'from itdagene.app.comments.forms import CommentForm\n'), ((636, 650), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (648, 650), False, 'from django.utils import timezone\n'), ((692, 720), 'itdagene.app.mail.tasks.send_comment_email', 'send_comment_email', (['instance'], {}), '(instance)\n', (710, 720), False, 'from itdagene.app.mail.tasks import send_comment_email\n'), ((840, 867), 'django.utils.translation.ugettext_lazy', '_', (['"""Could not post comment"""'], {}), "('Could not post comment')\n", (841, 867), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from django.urls import path
from channels.http import AsgiHandler
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from monitor.consumers import MemoryinfoConsumer
application = ProtocolTypeRouter({
"websocket": AuthMiddlewareStack(
URLRouter([
path("monitor/stream/", MemoryinfoConsumer),
]),
),
})
|
[
"django.urls.path"
] |
[((328, 371), 'django.urls.path', 'path', (['"""monitor/stream/"""', 'MemoryinfoConsumer'], {}), "('monitor/stream/', MemoryinfoConsumer)\n", (332, 371), False, 'from django.urls import path\n')]
|
from .array import TensorTrainArray
from .slice import TensorTrainSlice
from .dispatch import implement_function
from ..raw import find_balanced_cluster,trivial_decomposition
import numpy as np
def _get_cluster_chi_array(shape,cluster,chi):
if cluster is None:
cluster=find_balanced_cluster(shape)
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([1]+list(chi)+[1])
return cluster,chi
def _get_cluster_chi_slice(shape,cluster,chi):
if len(shape)<2:
raise ValueError("TensorTrainSlice has at least 2 dimensions.")
if cluster is None:
cluster=find_balanced_cluster(shape[1:-1])
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([shape[0]]+list(chi)+[shape[-1]])
return cluster,chi
@implement_function("empty","array")
def empty(shape,dtype=np.float64, cluster=None,chi=1):
'''
Create an empty TensorTrainArray
'''
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("empty","slice")
def empty_slice(shape,dtype=np.float64, cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def empty_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return empty(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return empty_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("zeros","array")
def zeros(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("zeros","slice")
def zeros_slice(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def zeros_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return zeros(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return zeros_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("ones","array")
def ones(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainArray.frommatrices(ms)
@implement_function("ones","slice")
def ones_slice(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def ones_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return ones(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return ones_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("full","array")
def full(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainArray.frommatrices(ms)
@implement_function("full","slice")
def full_slice(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def full_like(prototype, fill_value, dtype=None, shape=None, cluster=None, chi=1):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return full(shape,fill_value,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return full_slice(shape,fill_value,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("eye","array")
def eye(N, M=None, k=0, dtype=np.float64, cluster=None):
if M!=None:
raise NotImplementedError("not implemented yet ...")
return diag(ones((N,),dtype=dtype,cluster=cluster),k)
@implement_function("identity","array")
def identity(n,dtype=None,cluster=None):
return eye(N=n,dtype=dtype,cluster=cluster)
@implement_function("diag","array")
def diag(v,k=0):
pass
@implement_function("array","array")
def array(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainArray):
#copy if necessary, recluster if necessary
pass
elif isinstance(ar,TensorTrainSlice):
#just recast, clustering is then a bit weird, recluster afterwards
arm=ar.asmatrices()
if not copy:
arm[0]=arm[0][None,...]
else:
pass
ret=TensorTrainArray.frommatrices(arm)
if cluster is not None:
ret.recluster(cluster)
return ret
else:
return TensorTrainArray.fromdense(ar,dtype,cluster)
@implement_function("array","slice")
def slice(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainSlice):
#copy if necessary
pass
elif isinstance(ar,TensorTrainArray):
#recluster then recast
pass
else:
return TensorTrainSlice.fromdense(ar,dtype,cluster)
@implement_function("asarray","array")
def asarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asarray","slice")
def asslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","array")
def asanyarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","slice")
def asanyslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("frombuffer","array")
def frombuffer(buffer, dtype=float, count=- 1, offset=0, cluster=None):
return array(np.frombuffer(buffer,dtype,count,offset),dtype=dtype,cluster=cluster)
@implement_function("fromiter","array")
def fromiter(iter, dtype, count=- 1, cluster=None):
return array(np.fromiter(iter,dtype,count),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","array")
def fromfunction(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return array(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","slice")
def fromfunction_slice(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return slice(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function()
def copy(a,*,order=None,subok=None):
if isinstance(a,TensorTrainArray) or isinstance(a,TensorTrainSlice):
return a.copy()
else:
return NotImplemented
@implement_function("arange","array")
def arange(*args, **kwargs):
#wild hack to deal with optional arguments
if len(args)==5:
array(np.arange(*args[:-1],**kwargs),cluster=args[-1])
elif "cluster" in kwargs.keys():
cluster=kwargs["cluster"]
del kwargs["cluster"]
array(np.arange(*args,**kwargs),cluster=cluster)
else:
array(np.arange(*args,**kwargs))
@implement_function("linspace","array")
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
array(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
# @implement_function("linspace","slice")
# def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
# slice(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
@implement_function("logspace","array")
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("logspace","slice")
# def logspace_slice(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
# slice(np.logspace(todense(start),todense(stop),num,endpoint,base,dtype,axis),cluster=cluster)
@implement_function("geomspace","array")
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("geomspace","slice")
# def geomspace_slice(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
# slice(np.geomspace(todense(start),todense(stop),num,endpoint,dtype,axis),cluster=cluster)
# def fromdense(ar,dtype=None,cluster=None):
# return TensorTrainArray.fromdense(ar,dtype,cluster)
# def fromdense_slice(ar,dtype,cluster):
# return TensorTrainSlice.fromdense(ar,dtype,cluster)
def todense(ttar):
return ttar.todense()
@implement_function("asfarray","array")
def asfarray(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asarray(ttar,dtype=dtype)
@implement_function("asfarray","slice")
def asfslice(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asslice(ttar,dtype=dtype)
|
[
"numpy.frombuffer",
"numpy.ones",
"numpy.array",
"numpy.arange",
"numpy.fromiter",
"numpy.fromfunction",
"numpy.issubdtype"
] |
[((3271, 3300), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (3278, 3300), True, 'import numpy as np\n'), ((3633, 3660), 'numpy.ones', 'np.ones', (['ms[0].shape', 'dtype'], {}), '(ms[0].shape, dtype)\n', (3640, 3660), True, 'import numpy as np\n'), ((3691, 3723), 'numpy.ones', 'np.ones', (['ms[0].shape[:-1]', 'dtype'], {}), '(ms[0].shape[:-1], dtype)\n', (3698, 3723), True, 'import numpy as np\n'), ((3745, 3777), 'numpy.ones', 'np.ones', (['ms[-1].shape[1:]', 'dtype'], {}), '(ms[-1].shape[1:], dtype)\n', (3752, 3777), True, 'import numpy as np\n'), ((3819, 3848), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (3826, 3848), True, 'import numpy as np\n'), ((4744, 4773), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (4751, 4773), True, 'import numpy as np\n'), ((5196, 5223), 'numpy.ones', 'np.ones', (['ms[0].shape', 'dtype'], {}), '(ms[0].shape, dtype)\n', (5203, 5223), True, 'import numpy as np\n'), ((5254, 5286), 'numpy.ones', 'np.ones', (['ms[0].shape[:-1]', 'dtype'], {}), '(ms[0].shape[:-1], dtype)\n', (5261, 5286), True, 'import numpy as np\n'), ((5308, 5340), 'numpy.ones', 'np.ones', (['ms[-1].shape[1:]', 'dtype'], {}), '(ms[-1].shape[1:], dtype)\n', (5315, 5340), True, 'import numpy as np\n'), ((5382, 5411), 'numpy.ones', 'np.ones', (['m.shape[1:-1]', 'dtype'], {}), '(m.shape[1:-1], dtype)\n', (5389, 5411), True, 'import numpy as np\n'), ((8067, 8110), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'dtype', 'count', 'offset'], {}), '(buffer, dtype, count, offset)\n', (8080, 8110), True, 'import numpy as np\n'), ((8247, 8278), 'numpy.fromiter', 'np.fromiter', (['iter', 'dtype', 'count'], {}), '(iter, dtype, count)\n', (8258, 8278), True, 'import numpy as np\n'), ((8564, 8619), 'numpy.fromfunction', 'np.fromfunction', (['function', 'shape'], {'dtype': 'dtype'}), '(function, shape, dtype=dtype, **kwargs)\n', (8579, 8619), True, 'import numpy as np\n'), ((8909, 8964), 'numpy.fromfunction', 'np.fromfunction', (['function', 'shape'], {'dtype': 'dtype'}), '(function, shape, dtype=dtype, **kwargs)\n', (8924, 8964), True, 'import numpy as np\n'), ((11232, 11264), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.inexact'], {}), '(dtype, np.inexact)\n', (11245, 11264), True, 'import numpy as np\n'), ((11407, 11439), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.inexact'], {}), '(dtype, np.inexact)\n', (11420, 11439), True, 'import numpy as np\n'), ((4593, 4613), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (4601, 4613), True, 'import numpy as np\n'), ((5048, 5068), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (5056, 5068), True, 'import numpy as np\n'), ((9338, 9369), 'numpy.arange', 'np.arange', (['*args[:-1]'], {}), '(*args[:-1], **kwargs)\n', (9347, 9369), True, 'import numpy as np\n'), ((9502, 9528), 'numpy.arange', 'np.arange', (['*args'], {}), '(*args, **kwargs)\n', (9511, 9528), True, 'import numpy as np\n'), ((9569, 9595), 'numpy.arange', 'np.arange', (['*args'], {}), '(*args, **kwargs)\n', (9578, 9595), True, 'import numpy as np\n')]
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from .views import *
router = DefaultRouter()
router.register('image', ImageViewSet)
router.register('file', FileViewSet)
urlpatterns = [
url(r'', include(router.urls)),
url(r'^upload_image/(?P<filename>[^/]+)$', ImageUploadView.as_view()),
url(r'^upload_file/(?P<filename>[^/]+)$', FileUploadView.as_view()),
]
|
[
"django.conf.urls.include",
"rest_framework.routers.DefaultRouter"
] |
[((123, 138), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (136, 138), False, 'from rest_framework.routers import DefaultRouter\n'), ((245, 265), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (252, 265), False, 'from django.conf.urls import url, include\n')]
|